Skip to content

Commit

Permalink
disabling docker events (#359)
Browse files Browse the repository at this point in the history
* disabling docker events

Signed-off-by: Shripad Nadgowda <[email protected]>

* disabling docker events

Signed-off-by: Shripad Nadgowda <[email protected]>

* disabling docker events

Signed-off-by: Shripad Nadgowda <[email protected]>

* disabling docker events

Signed-off-by: Shripad Nadgowda <[email protected]>
  • Loading branch information
nadgowdas authored and sahilsuneja1 committed Feb 13, 2018
1 parent a0ec321 commit 35e6dd7
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 13 deletions.
23 changes: 13 additions & 10 deletions crawler/dockercontainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import logging
import os
import shutil
import time

from requests.exceptions import HTTPError

Expand All @@ -21,8 +22,7 @@
from utils.dockerutils import (exec_dockerps,
get_docker_container_json_logs_path,
get_docker_container_rootfs_path,
exec_dockerinspect,
poll_container_create_events)
exec_dockerinspect)

try:
basestring # Python 2
Expand Down Expand Up @@ -83,14 +83,17 @@ def poll_docker_containers(timeout, user_list=None, host_namespace=''):
return None

try:
cEvent = poll_container_create_events(timeout)

if not cEvent:
return None
c = DockerContainer(cEvent.get_containerid(), inspect=None,
host_namespace=host_namespace)
if c.namespace:
return c
# We are currently throttling docker events
# so instead of polling we will just sleep for timeout interval
time.sleep(timeout)
# cEvent = poll_container_create_events(timeout)

# if not cEvent:
# return None
# c = DockerContainer(cEvent.get_containerid(), inspect=None,
# host_namespace=host_namespace)
# if c.namespace:
# return c
except ContainerInvalidEnvironment as e:
logger.exception(e)

Expand Down
6 changes: 3 additions & 3 deletions tests/functional/test_functional_dockerevents.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def __exec_kill_crawlers(self):
In this case, crawler would miss the create event, but it should be able to
discover already running containers and snapshot them
'''
def testCrawlContainer0(self):
def _noexec_testCrawlContainer0(self):
env = os.environ.copy()
mypath = os.path.dirname(os.path.realpath(__file__))
os.makedirs(self.tempd + '/out')
Expand Down Expand Up @@ -114,7 +114,7 @@ def testCrawlContainer0(self):
crawler should get intrupptted and start snapshotting container immediately.
'''
def testCrawlContainer1(self):
def _noexec_testCrawlContainer1(self):
env = os.environ.copy()
mypath = os.path.dirname(os.path.realpath(__file__))
os.makedirs(self.tempd + '/out')
Expand Down Expand Up @@ -169,7 +169,7 @@ def testCrawlContainer1(self):
And then we will wait for crawler's next iteration to ensure, w/o docker event,
crawler will timeout and snapshot container periodically
'''
def testCrawlContainer2(self):
def _noexec_testCrawlContainer2(self):
env = os.environ.copy()
mypath = os.path.dirname(os.path.realpath(__file__))
os.makedirs(self.tempd + '/out')
Expand Down

0 comments on commit 35e6dd7

Please sign in to comment.