repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
glitchassassin/lackey
lackey/RegionMatching.py
Region.setBottomRight
def setBottomRight(self, loc): """ Move this region so its bottom right corner is on ``loc`` """ offset = self.getBottomRight().getOffset(loc) # Calculate offset from current bottom right return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
python
def setBottomRight(self, loc): """ Move this region so its bottom right corner is on ``loc`` """ offset = self.getBottomRight().getOffset(loc) # Calculate offset from current bottom right return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
[ "def", "setBottomRight", "(", "self", ",", "loc", ")", ":", "offset", "=", "self", ".", "getBottomRight", "(", ")", ".", "getOffset", "(", "loc", ")", "# Calculate offset from current bottom right", "return", "self", ".", "setLocation", "(", "self", ".", "getTopLeft", "(", ")", ".", "offset", "(", "offset", ")", ")", "# Move top left corner by the same offset" ]
Move this region so its bottom right corner is on ``loc``
[ "Move", "this", "region", "so", "its", "bottom", "right", "corner", "is", "on", "loc" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1241-L1244
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.setSize
def setSize(self, w, h): """ Sets the new size of the region """ self.setW(w) self.setH(h) return self
python
def setSize(self, w, h): """ Sets the new size of the region """ self.setW(w) self.setH(h) return self
[ "def", "setSize", "(", "self", ",", "w", ",", "h", ")", ":", "self", ".", "setW", "(", "w", ")", "self", ".", "setH", "(", "h", ")", "return", "self" ]
Sets the new size of the region
[ "Sets", "the", "new", "size", "of", "the", "region" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1245-L1249
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.saveScreenCapture
def saveScreenCapture(self, path=None, name=None): """ Saves the region's bitmap """ bitmap = self.getBitmap() target_file = None if path is None and name is None: _, target_file = tempfile.mkstemp(".png") elif name is None: _, tpath = tempfile.mkstemp(".png") target_file = os.path.join(path, tfile) else: target_file = os.path.join(path, name+".png") cv2.imwrite(target_file, bitmap) return target_file
python
def saveScreenCapture(self, path=None, name=None): """ Saves the region's bitmap """ bitmap = self.getBitmap() target_file = None if path is None and name is None: _, target_file = tempfile.mkstemp(".png") elif name is None: _, tpath = tempfile.mkstemp(".png") target_file = os.path.join(path, tfile) else: target_file = os.path.join(path, name+".png") cv2.imwrite(target_file, bitmap) return target_file
[ "def", "saveScreenCapture", "(", "self", ",", "path", "=", "None", ",", "name", "=", "None", ")", ":", "bitmap", "=", "self", ".", "getBitmap", "(", ")", "target_file", "=", "None", "if", "path", "is", "None", "and", "name", "is", "None", ":", "_", ",", "target_file", "=", "tempfile", ".", "mkstemp", "(", "\".png\"", ")", "elif", "name", "is", "None", ":", "_", ",", "tpath", "=", "tempfile", ".", "mkstemp", "(", "\".png\"", ")", "target_file", "=", "os", ".", "path", ".", "join", "(", "path", ",", "tfile", ")", "else", ":", "target_file", "=", "os", ".", "path", ".", "join", "(", "path", ",", "name", "+", "\".png\"", ")", "cv2", ".", "imwrite", "(", "target_file", ",", "bitmap", ")", "return", "target_file" ]
Saves the region's bitmap
[ "Saves", "the", "region", "s", "bitmap" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1277-L1289
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.saveLastScreenImage
def saveLastScreenImage(self): """ Saves the last image taken on this region's screen to a temporary file """ bitmap = self.getLastScreenImage() _, target_file = tempfile.mkstemp(".png") cv2.imwrite(target_file, bitmap)
python
def saveLastScreenImage(self): """ Saves the last image taken on this region's screen to a temporary file """ bitmap = self.getLastScreenImage() _, target_file = tempfile.mkstemp(".png") cv2.imwrite(target_file, bitmap)
[ "def", "saveLastScreenImage", "(", "self", ")", ":", "bitmap", "=", "self", ".", "getLastScreenImage", "(", ")", "_", ",", "target_file", "=", "tempfile", ".", "mkstemp", "(", "\".png\"", ")", "cv2", ".", "imwrite", "(", "target_file", ",", "bitmap", ")" ]
Saves the last image taken on this region's screen to a temporary file
[ "Saves", "the", "last", "image", "taken", "on", "this", "region", "s", "screen", "to", "a", "temporary", "file" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1293-L1297
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.onChange
def onChange(self, min_changed_pixels=None, handler=None): """ Registers an event to call ``handler`` when at least ``min_changed_pixels`` change in this region. (Default for min_changed_pixels is set in Settings.ObserveMinChangedPixels) The ``handler`` function should take one parameter, an ObserveEvent object (see below). This event is ignored in the future unless the handler calls the repeat() method on the provided ObserveEvent object. Returns the event's ID as a string. """ if isinstance(min_changed_pixels, int) and (callable(handler) or handler is None): return self._observer.register_event( "CHANGE", pattern=(min_changed_pixels, self.getBitmap()), handler=handler) elif (callable(min_changed_pixels) or min_changed_pixels is None) and (callable(handler) or handler is None): handler = min_changed_pixels or handler return self._observer.register_event( "CHANGE", pattern=(Settings.ObserveMinChangedPixels, self.getBitmap()), handler=handler) else: raise ValueError("Unsupported arguments for onChange method")
python
def onChange(self, min_changed_pixels=None, handler=None): """ Registers an event to call ``handler`` when at least ``min_changed_pixels`` change in this region. (Default for min_changed_pixels is set in Settings.ObserveMinChangedPixels) The ``handler`` function should take one parameter, an ObserveEvent object (see below). This event is ignored in the future unless the handler calls the repeat() method on the provided ObserveEvent object. Returns the event's ID as a string. """ if isinstance(min_changed_pixels, int) and (callable(handler) or handler is None): return self._observer.register_event( "CHANGE", pattern=(min_changed_pixels, self.getBitmap()), handler=handler) elif (callable(min_changed_pixels) or min_changed_pixels is None) and (callable(handler) or handler is None): handler = min_changed_pixels or handler return self._observer.register_event( "CHANGE", pattern=(Settings.ObserveMinChangedPixels, self.getBitmap()), handler=handler) else: raise ValueError("Unsupported arguments for onChange method")
[ "def", "onChange", "(", "self", ",", "min_changed_pixels", "=", "None", ",", "handler", "=", "None", ")", ":", "if", "isinstance", "(", "min_changed_pixels", ",", "int", ")", "and", "(", "callable", "(", "handler", ")", "or", "handler", "is", "None", ")", ":", "return", "self", ".", "_observer", ".", "register_event", "(", "\"CHANGE\"", ",", "pattern", "=", "(", "min_changed_pixels", ",", "self", ".", "getBitmap", "(", ")", ")", ",", "handler", "=", "handler", ")", "elif", "(", "callable", "(", "min_changed_pixels", ")", "or", "min_changed_pixels", "is", "None", ")", "and", "(", "callable", "(", "handler", ")", "or", "handler", "is", "None", ")", ":", "handler", "=", "min_changed_pixels", "or", "handler", "return", "self", ".", "_observer", ".", "register_event", "(", "\"CHANGE\"", ",", "pattern", "=", "(", "Settings", ".", "ObserveMinChangedPixels", ",", "self", ".", "getBitmap", "(", ")", ")", ",", "handler", "=", "handler", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported arguments for onChange method\"", ")" ]
Registers an event to call ``handler`` when at least ``min_changed_pixels`` change in this region. (Default for min_changed_pixels is set in Settings.ObserveMinChangedPixels) The ``handler`` function should take one parameter, an ObserveEvent object (see below). This event is ignored in the future unless the handler calls the repeat() method on the provided ObserveEvent object. Returns the event's ID as a string.
[ "Registers", "an", "event", "to", "call", "handler", "when", "at", "least", "min_changed_pixels", "change", "in", "this", "region", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1400-L1424
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.isChanged
def isChanged(self, min_changed_pixels, screen_state): """ Returns true if at least ``min_changed_pixels`` are different between ``screen_state`` and the current state. """ r = self.clipRegionToScreen() current_state = r.getBitmap() diff = numpy.subtract(current_state, screen_state) return (numpy.count_nonzero(diff) >= min_changed_pixels)
python
def isChanged(self, min_changed_pixels, screen_state): """ Returns true if at least ``min_changed_pixels`` are different between ``screen_state`` and the current state. """ r = self.clipRegionToScreen() current_state = r.getBitmap() diff = numpy.subtract(current_state, screen_state) return (numpy.count_nonzero(diff) >= min_changed_pixels)
[ "def", "isChanged", "(", "self", ",", "min_changed_pixels", ",", "screen_state", ")", ":", "r", "=", "self", ".", "clipRegionToScreen", "(", ")", "current_state", "=", "r", ".", "getBitmap", "(", ")", "diff", "=", "numpy", ".", "subtract", "(", "current_state", ",", "screen_state", ")", "return", "(", "numpy", ".", "count_nonzero", "(", "diff", ")", ">=", "min_changed_pixels", ")" ]
Returns true if at least ``min_changed_pixels`` are different between ``screen_state`` and the current state.
[ "Returns", "true", "if", "at", "least", "min_changed_pixels", "are", "different", "between", "screen_state", "and", "the", "current", "state", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1425-L1432
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.stopObserver
def stopObserver(self): """ Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically. """ self._observer.isStopped = True self._observer.isRunning = False
python
def stopObserver(self): """ Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically. """ self._observer.isStopped = True self._observer.isRunning = False
[ "def", "stopObserver", "(", "self", ")", ":", "self", ".", "_observer", ".", "isStopped", "=", "True", "self", ".", "_observer", ".", "isRunning", "=", "False" ]
Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically.
[ "Stops", "this", "region", "s", "observer", "loop", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1486-L1492
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.getEvents
def getEvents(self): """ Returns a list of all events that have occurred. Empties the internal queue. """ caught_events = self._observer.caught_events self._observer.caught_events = [] for event in caught_events: self._observer.activate_event(event["name"]) return caught_events
python
def getEvents(self): """ Returns a list of all events that have occurred. Empties the internal queue. """ caught_events = self._observer.caught_events self._observer.caught_events = [] for event in caught_events: self._observer.activate_event(event["name"]) return caught_events
[ "def", "getEvents", "(", "self", ")", ":", "caught_events", "=", "self", ".", "_observer", ".", "caught_events", "self", ".", "_observer", ".", "caught_events", "=", "[", "]", "for", "event", "in", "caught_events", ":", "self", ".", "_observer", ".", "activate_event", "(", "event", "[", "\"name\"", "]", ")", "return", "caught_events" ]
Returns a list of all events that have occurred. Empties the internal queue.
[ "Returns", "a", "list", "of", "all", "events", "that", "have", "occurred", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1506-L1515
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.getEvent
def getEvent(self, name): """ Returns the named event. Removes it from the internal queue. """ to_return = None for event in self._observer.caught_events: if event["name"] == name: to_return = event break if to_return: self._observer.caught_events.remove(to_return) self._observer.activate_event(to_return["name"]) return to_return
python
def getEvent(self, name): """ Returns the named event. Removes it from the internal queue. """ to_return = None for event in self._observer.caught_events: if event["name"] == name: to_return = event break if to_return: self._observer.caught_events.remove(to_return) self._observer.activate_event(to_return["name"]) return to_return
[ "def", "getEvent", "(", "self", ",", "name", ")", ":", "to_return", "=", "None", "for", "event", "in", "self", ".", "_observer", ".", "caught_events", ":", "if", "event", "[", "\"name\"", "]", "==", "name", ":", "to_return", "=", "event", "break", "if", "to_return", ":", "self", ".", "_observer", ".", "caught_events", ".", "remove", "(", "to_return", ")", "self", ".", "_observer", ".", "activate_event", "(", "to_return", "[", "\"name\"", "]", ")", "return", "to_return" ]
Returns the named event. Removes it from the internal queue.
[ "Returns", "the", "named", "event", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1516-L1529
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.setFindFailedResponse
def setFindFailedResponse(self, response): """ Set the response to a FindFailed exception in this region. Can be ABORT, SKIP, PROMPT, or RETRY. """ valid_responses = ("ABORT", "SKIP", "PROMPT", "RETRY") if response not in valid_responses: raise ValueError("Invalid response - expected one of ({})".format(", ".join(valid_responses))) self._findFailedResponse = response
python
def setFindFailedResponse(self, response): """ Set the response to a FindFailed exception in this region. Can be ABORT, SKIP, PROMPT, or RETRY. """ valid_responses = ("ABORT", "SKIP", "PROMPT", "RETRY") if response not in valid_responses: raise ValueError("Invalid response - expected one of ({})".format(", ".join(valid_responses))) self._findFailedResponse = response
[ "def", "setFindFailedResponse", "(", "self", ",", "response", ")", ":", "valid_responses", "=", "(", "\"ABORT\"", ",", "\"SKIP\"", ",", "\"PROMPT\"", ",", "\"RETRY\"", ")", "if", "response", "not", "in", "valid_responses", ":", "raise", "ValueError", "(", "\"Invalid response - expected one of ({})\"", ".", "format", "(", "\", \"", ".", "join", "(", "valid_responses", ")", ")", ")", "self", ".", "_findFailedResponse", "=", "response" ]
Set the response to a FindFailed exception in this region. Can be ABORT, SKIP, PROMPT, or RETRY.
[ "Set", "the", "response", "to", "a", "FindFailed", "exception", "in", "this", "region", ".", "Can", "be", "ABORT", "SKIP", "PROMPT", "or", "RETRY", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1569-L1576
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.setThrowException
def setThrowException(self, setting): """ Defines whether an exception should be thrown for FindFailed operations. ``setting`` should be True or False. """ if setting: self._throwException = True self._findFailedResponse = "ABORT" else: self._throwException = False self._findFailedResponse = "SKIP"
python
def setThrowException(self, setting): """ Defines whether an exception should be thrown for FindFailed operations. ``setting`` should be True or False. """ if setting: self._throwException = True self._findFailedResponse = "ABORT" else: self._throwException = False self._findFailedResponse = "SKIP"
[ "def", "setThrowException", "(", "self", ",", "setting", ")", ":", "if", "setting", ":", "self", ".", "_throwException", "=", "True", "self", ".", "_findFailedResponse", "=", "\"ABORT\"", "else", ":", "self", ".", "_throwException", "=", "False", "self", ".", "_findFailedResponse", "=", "\"SKIP\"" ]
Defines whether an exception should be thrown for FindFailed operations. ``setting`` should be True or False.
[ "Defines", "whether", "an", "exception", "should", "be", "thrown", "for", "FindFailed", "operations", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1586-L1595
train
glitchassassin/lackey
lackey/RegionMatching.py
Observer.register_event
def register_event(self, event_type, pattern, handler): """ When ``event_type`` is observed for ``pattern``, triggers ``handler``. For "CHANGE" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state. """ if event_type not in self._supported_events: raise ValueError("Unsupported event type {}".format(event_type)) if event_type != "CHANGE" and not isinstance(pattern, Pattern) and not isinstance(pattern, basestring): raise ValueError("Expected pattern to be a Pattern or string") if event_type == "CHANGE" and not (len(pattern)==2 and isinstance(pattern[0], int) and isinstance(pattern[1], numpy.ndarray)): raise ValueError("For \"CHANGE\" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state.") # Create event object event = { "pattern": pattern, "event_type": event_type, "count": 0, "handler": handler, "name": uuid.uuid4(), "active": True } self._events[event["name"]] = event return event["name"]
python
def register_event(self, event_type, pattern, handler): """ When ``event_type`` is observed for ``pattern``, triggers ``handler``. For "CHANGE" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state. """ if event_type not in self._supported_events: raise ValueError("Unsupported event type {}".format(event_type)) if event_type != "CHANGE" and not isinstance(pattern, Pattern) and not isinstance(pattern, basestring): raise ValueError("Expected pattern to be a Pattern or string") if event_type == "CHANGE" and not (len(pattern)==2 and isinstance(pattern[0], int) and isinstance(pattern[1], numpy.ndarray)): raise ValueError("For \"CHANGE\" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state.") # Create event object event = { "pattern": pattern, "event_type": event_type, "count": 0, "handler": handler, "name": uuid.uuid4(), "active": True } self._events[event["name"]] = event return event["name"]
[ "def", "register_event", "(", "self", ",", "event_type", ",", "pattern", ",", "handler", ")", ":", "if", "event_type", "not", "in", "self", ".", "_supported_events", ":", "raise", "ValueError", "(", "\"Unsupported event type {}\"", ".", "format", "(", "event_type", ")", ")", "if", "event_type", "!=", "\"CHANGE\"", "and", "not", "isinstance", "(", "pattern", ",", "Pattern", ")", "and", "not", "isinstance", "(", "pattern", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"Expected pattern to be a Pattern or string\"", ")", "if", "event_type", "==", "\"CHANGE\"", "and", "not", "(", "len", "(", "pattern", ")", "==", "2", "and", "isinstance", "(", "pattern", "[", "0", "]", ",", "int", ")", "and", "isinstance", "(", "pattern", "[", "1", "]", ",", "numpy", ".", "ndarray", ")", ")", ":", "raise", "ValueError", "(", "\"For \\\"CHANGE\\\" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state.\"", ")", "# Create event object", "event", "=", "{", "\"pattern\"", ":", "pattern", ",", "\"event_type\"", ":", "event_type", ",", "\"count\"", ":", "0", ",", "\"handler\"", ":", "handler", ",", "\"name\"", ":", "uuid", ".", "uuid4", "(", ")", ",", "\"active\"", ":", "True", "}", "self", ".", "_events", "[", "event", "[", "\"name\"", "]", "]", "=", "event", "return", "event", "[", "\"name\"", "]" ]
When ``event_type`` is observed for ``pattern``, triggers ``handler``. For "CHANGE" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state.
[ "When", "event_type", "is", "observed", "for", "pattern", "triggers", "handler", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1650-L1673
train
glitchassassin/lackey
lackey/RegionMatching.py
Screen.capture
def capture(self, *args): #x=None, y=None, w=None, h=None): """ Captures the region as an image """ if len(args) == 0: # Capture screen region region = self elif isinstance(args[0], Region): # Capture specified region region = args[0] elif isinstance(args[0], tuple): # Capture region defined by specified tuple region = Region(*args[0]) elif isinstance(args[0], basestring): # Interactive mode raise NotImplementedError("Interactive capture mode not defined") elif isinstance(args[0], int): # Capture region defined by provided x,y,w,h region = Region(*args) self.lastScreenImage = region.getBitmap() return self.lastScreenImage
python
def capture(self, *args): #x=None, y=None, w=None, h=None): """ Captures the region as an image """ if len(args) == 0: # Capture screen region region = self elif isinstance(args[0], Region): # Capture specified region region = args[0] elif isinstance(args[0], tuple): # Capture region defined by specified tuple region = Region(*args[0]) elif isinstance(args[0], basestring): # Interactive mode raise NotImplementedError("Interactive capture mode not defined") elif isinstance(args[0], int): # Capture region defined by provided x,y,w,h region = Region(*args) self.lastScreenImage = region.getBitmap() return self.lastScreenImage
[ "def", "capture", "(", "self", ",", "*", "args", ")", ":", "#x=None, y=None, w=None, h=None):", "if", "len", "(", "args", ")", "==", "0", ":", "# Capture screen region", "region", "=", "self", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "Region", ")", ":", "# Capture specified region", "region", "=", "args", "[", "0", "]", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "tuple", ")", ":", "# Capture region defined by specified tuple", "region", "=", "Region", "(", "*", "args", "[", "0", "]", ")", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "basestring", ")", ":", "# Interactive mode", "raise", "NotImplementedError", "(", "\"Interactive capture mode not defined\"", ")", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "int", ")", ":", "# Capture region defined by provided x,y,w,h", "region", "=", "Region", "(", "*", "args", ")", "self", ".", "lastScreenImage", "=", "region", ".", "getBitmap", "(", ")", "return", "self", ".", "lastScreenImage" ]
Captures the region as an image
[ "Captures", "the", "region", "as", "an", "image" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1854-L1872
train
glitchassassin/lackey
lackey/RegionMatching.py
Screen.showMonitors
def showMonitors(cls): """ Prints debug information about currently detected screens """ Debug.info("*** monitor configuration [ {} Screen(s)] ***".format(cls.getNumberScreens())) Debug.info("*** Primary is Screen {}".format(cls.primaryScreen)) for index, screen in enumerate(PlatformManager.getScreenDetails()): Debug.info("Screen {}: ({}, {}, {}, {})".format(index, *screen["rect"])) Debug.info("*** end monitor configuration ***")
python
def showMonitors(cls): """ Prints debug information about currently detected screens """ Debug.info("*** monitor configuration [ {} Screen(s)] ***".format(cls.getNumberScreens())) Debug.info("*** Primary is Screen {}".format(cls.primaryScreen)) for index, screen in enumerate(PlatformManager.getScreenDetails()): Debug.info("Screen {}: ({}, {}, {}, {})".format(index, *screen["rect"])) Debug.info("*** end monitor configuration ***")
[ "def", "showMonitors", "(", "cls", ")", ":", "Debug", ".", "info", "(", "\"*** monitor configuration [ {} Screen(s)] ***\"", ".", "format", "(", "cls", ".", "getNumberScreens", "(", ")", ")", ")", "Debug", ".", "info", "(", "\"*** Primary is Screen {}\"", ".", "format", "(", "cls", ".", "primaryScreen", ")", ")", "for", "index", ",", "screen", "in", "enumerate", "(", "PlatformManager", ".", "getScreenDetails", "(", ")", ")", ":", "Debug", ".", "info", "(", "\"Screen {}: ({}, {}, {}, {})\"", ".", "format", "(", "index", ",", "*", "screen", "[", "\"rect\"", "]", ")", ")", "Debug", ".", "info", "(", "\"*** end monitor configuration ***\"", ")" ]
Prints debug information about currently detected screens
[ "Prints", "debug", "information", "about", "currently", "detected", "screens" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1908-L1914
train
glitchassassin/lackey
lackey/RegionMatching.py
Screen.resetMonitors
def resetMonitors(self): """ Recalculates screen based on changed monitor setup """ Debug.error("*** BE AWARE: experimental - might not work ***") Debug.error("Re-evaluation of the monitor setup has been requested") Debug.error("... Current Region/Screen objects might not be valid any longer") Debug.error("... Use existing Region/Screen objects only if you know what you are doing!") self.__init__(self._screenId) self.showMonitors()
python
def resetMonitors(self): """ Recalculates screen based on changed monitor setup """ Debug.error("*** BE AWARE: experimental - might not work ***") Debug.error("Re-evaluation of the monitor setup has been requested") Debug.error("... Current Region/Screen objects might not be valid any longer") Debug.error("... Use existing Region/Screen objects only if you know what you are doing!") self.__init__(self._screenId) self.showMonitors()
[ "def", "resetMonitors", "(", "self", ")", ":", "Debug", ".", "error", "(", "\"*** BE AWARE: experimental - might not work ***\"", ")", "Debug", ".", "error", "(", "\"Re-evaluation of the monitor setup has been requested\"", ")", "Debug", ".", "error", "(", "\"... Current Region/Screen objects might not be valid any longer\"", ")", "Debug", ".", "error", "(", "\"... Use existing Region/Screen objects only if you know what you are doing!\"", ")", "self", ".", "__init__", "(", "self", ".", "_screenId", ")", "self", ".", "showMonitors", "(", ")" ]
Recalculates screen based on changed monitor setup
[ "Recalculates", "screen", "based", "on", "changed", "monitor", "setup" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1915-L1922
train
glitchassassin/lackey
lackey/RegionMatching.py
Screen.newRegion
def newRegion(self, loc, width, height): """ Creates a new region on the current screen at the specified offset with the specified width and height. """ return Region.create(self.getTopLeft().offset(loc), width, height)
python
def newRegion(self, loc, width, height): """ Creates a new region on the current screen at the specified offset with the specified width and height. """ return Region.create(self.getTopLeft().offset(loc), width, height)
[ "def", "newRegion", "(", "self", ",", "loc", ",", "width", ",", "height", ")", ":", "return", "Region", ".", "create", "(", "self", ".", "getTopLeft", "(", ")", ".", "offset", "(", "loc", ")", ",", "width", ",", "height", ")" ]
Creates a new region on the current screen at the specified offset with the specified width and height.
[ "Creates", "a", "new", "region", "on", "the", "current", "screen", "at", "the", "specified", "offset", "with", "the", "specified", "width", "and", "height", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1923-L1926
train
glitchassassin/lackey
lackey/SettingsDebug.py
DebugMaster.history
def history(self, message): """ Records an Action-level log message Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT """ if Settings.ActionLogs: self._write_log("action", Settings.LogTime, message)
python
def history(self, message): """ Records an Action-level log message Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT """ if Settings.ActionLogs: self._write_log("action", Settings.LogTime, message)
[ "def", "history", "(", "self", ",", "message", ")", ":", "if", "Settings", ".", "ActionLogs", ":", "self", ".", "_write_log", "(", "\"action\"", ",", "Settings", ".", "LogTime", ",", "message", ")" ]
Records an Action-level log message Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT
[ "Records", "an", "Action", "-", "level", "log", "message" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/SettingsDebug.py#L34-L41
train
glitchassassin/lackey
lackey/SettingsDebug.py
DebugMaster.error
def error(self, message): """ Records an Error-level log message Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT """ if Settings.ErrorLogs: self._write_log("error", Settings.LogTime, message)
python
def error(self, message): """ Records an Error-level log message Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT """ if Settings.ErrorLogs: self._write_log("error", Settings.LogTime, message)
[ "def", "error", "(", "self", ",", "message", ")", ":", "if", "Settings", ".", "ErrorLogs", ":", "self", ".", "_write_log", "(", "\"error\"", ",", "Settings", ".", "LogTime", ",", "message", ")" ]
Records an Error-level log message Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT
[ "Records", "an", "Error", "-", "level", "log", "message" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/SettingsDebug.py#L42-L49
train
glitchassassin/lackey
lackey/SettingsDebug.py
DebugMaster.info
def info(self, message): """ Records an Info-level log message Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT """ if Settings.InfoLogs: self._write_log("info", Settings.LogTime, message)
python
def info(self, message): """ Records an Info-level log message Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT """ if Settings.InfoLogs: self._write_log("info", Settings.LogTime, message)
[ "def", "info", "(", "self", ",", "message", ")", ":", "if", "Settings", ".", "InfoLogs", ":", "self", ".", "_write_log", "(", "\"info\"", ",", "Settings", ".", "LogTime", ",", "message", ")" ]
Records an Info-level log message Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT
[ "Records", "an", "Info", "-", "level", "log", "message" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/SettingsDebug.py#L50-L57
train
glitchassassin/lackey
lackey/SettingsDebug.py
DebugMaster.on
def on(self, level): """ Turns on all debugging messages up to the specified level 0 = None; 1 = User; """ if isinstance(level, int) and level >= 0 and level <= 3: self._debug_level = level
python
def on(self, level): """ Turns on all debugging messages up to the specified level 0 = None; 1 = User; """ if isinstance(level, int) and level >= 0 and level <= 3: self._debug_level = level
[ "def", "on", "(", "self", ",", "level", ")", ":", "if", "isinstance", "(", "level", ",", "int", ")", "and", "level", ">=", "0", "and", "level", "<=", "3", ":", "self", ".", "_debug_level", "=", "level" ]
Turns on all debugging messages up to the specified level 0 = None; 1 = User;
[ "Turns", "on", "all", "debugging", "messages", "up", "to", "the", "specified", "level" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/SettingsDebug.py#L58-L64
train
glitchassassin/lackey
lackey/SettingsDebug.py
DebugMaster.setLogFile
def setLogFile(self, filepath): """ Defines the file to which output log messages should be sent. Set to `None` to print to STDOUT instead. """ if filepath is None: self._log_file = None return parsed_path = os.path.abspath(filepath) # Checks if the provided log filename is in a real directory, and that # the filename itself is not a directory. if os.path.isdir(os.path.dirname(parsed_path)) and not os.path.isdir(parsed_path): self._log_file = parsed_path else: raise IOError("File not found: " + filepath)
python
def setLogFile(self, filepath): """ Defines the file to which output log messages should be sent. Set to `None` to print to STDOUT instead. """ if filepath is None: self._log_file = None return parsed_path = os.path.abspath(filepath) # Checks if the provided log filename is in a real directory, and that # the filename itself is not a directory. if os.path.isdir(os.path.dirname(parsed_path)) and not os.path.isdir(parsed_path): self._log_file = parsed_path else: raise IOError("File not found: " + filepath)
[ "def", "setLogFile", "(", "self", ",", "filepath", ")", ":", "if", "filepath", "is", "None", ":", "self", ".", "_log_file", "=", "None", "return", "parsed_path", "=", "os", ".", "path", ".", "abspath", "(", "filepath", ")", "# Checks if the provided log filename is in a real directory, and that", "# the filename itself is not a directory.", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "dirname", "(", "parsed_path", ")", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "parsed_path", ")", ":", "self", ".", "_log_file", "=", "parsed_path", "else", ":", "raise", "IOError", "(", "\"File not found: \"", "+", "filepath", ")" ]
Defines the file to which output log messages should be sent. Set to `None` to print to STDOUT instead.
[ "Defines", "the", "file", "to", "which", "output", "log", "messages", "should", "be", "sent", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/SettingsDebug.py#L102-L116
train
glitchassassin/lackey
lackey/SettingsDebug.py
DebugMaster._write_log
def _write_log(self, log_type, log_time, message): """ Private method to abstract log writing for different types of logs """ timestamp = datetime.datetime.now().strftime(" %Y-%m-%d %H:%M:%S") log_entry = "[{}{}] {}".format(log_type, timestamp if log_time else "", message) if self._logger and callable(getattr(self._logger, self._logger_methods[log_type], None)): # Check for log handler (sends message only if _logger_no_prefix is True) getattr( self._logger, self._logger_methods[log_type], None )(message if self._logger_no_prefix else log_entry) elif self._log_file: # Otherwise write to file, if a file has been specified with open(self._log_file, 'a') as logfile: try: logfile.write(unicode(log_entry + "\n")) except NameError: # `unicode` only works in Python 2 logfile.write(log_entry + "\n") else: # Otherwise, print to STDOUT print(log_entry)
python
def _write_log(self, log_type, log_time, message): """ Private method to abstract log writing for different types of logs """ timestamp = datetime.datetime.now().strftime(" %Y-%m-%d %H:%M:%S") log_entry = "[{}{}] {}".format(log_type, timestamp if log_time else "", message) if self._logger and callable(getattr(self._logger, self._logger_methods[log_type], None)): # Check for log handler (sends message only if _logger_no_prefix is True) getattr( self._logger, self._logger_methods[log_type], None )(message if self._logger_no_prefix else log_entry) elif self._log_file: # Otherwise write to file, if a file has been specified with open(self._log_file, 'a') as logfile: try: logfile.write(unicode(log_entry + "\n")) except NameError: # `unicode` only works in Python 2 logfile.write(log_entry + "\n") else: # Otherwise, print to STDOUT print(log_entry)
[ "def", "_write_log", "(", "self", ",", "log_type", ",", "log_time", ",", "message", ")", ":", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\" %Y-%m-%d %H:%M:%S\"", ")", "log_entry", "=", "\"[{}{}] {}\"", ".", "format", "(", "log_type", ",", "timestamp", "if", "log_time", "else", "\"\"", ",", "message", ")", "if", "self", ".", "_logger", "and", "callable", "(", "getattr", "(", "self", ".", "_logger", ",", "self", ".", "_logger_methods", "[", "log_type", "]", ",", "None", ")", ")", ":", "# Check for log handler (sends message only if _logger_no_prefix is True)", "getattr", "(", "self", ".", "_logger", ",", "self", ".", "_logger_methods", "[", "log_type", "]", ",", "None", ")", "(", "message", "if", "self", ".", "_logger_no_prefix", "else", "log_entry", ")", "elif", "self", ".", "_log_file", ":", "# Otherwise write to file, if a file has been specified", "with", "open", "(", "self", ".", "_log_file", ",", "'a'", ")", "as", "logfile", ":", "try", ":", "logfile", ".", "write", "(", "unicode", "(", "log_entry", "+", "\"\\n\"", ")", ")", "except", "NameError", ":", "# `unicode` only works in Python 2", "logfile", ".", "write", "(", "log_entry", "+", "\"\\n\"", ")", "else", ":", "# Otherwise, print to STDOUT", "print", "(", "log_entry", ")" ]
Private method to abstract log writing for different types of logs
[ "Private", "method", "to", "abstract", "log", "writing", "for", "different", "types", "of", "logs" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/SettingsDebug.py#L117-L137
train
glitchassassin/lackey
lackey/__init__.py
addImagePath
def addImagePath(new_path): """ Convenience function. Adds a path to the list of paths to search for images. Can be a URL (but must be accessible). """ if os.path.exists(new_path): Settings.ImagePaths.append(new_path) elif "http://" in new_path or "https://" in new_path: request = requests.get(new_path) if request.status_code < 400: # Path exists Settings.ImagePaths.append(new_path) else: raise OSError("Unable to connect to " + new_path) else: raise OSError("File not found: " + new_path)
python
def addImagePath(new_path): """ Convenience function. Adds a path to the list of paths to search for images. Can be a URL (but must be accessible). """ if os.path.exists(new_path): Settings.ImagePaths.append(new_path) elif "http://" in new_path or "https://" in new_path: request = requests.get(new_path) if request.status_code < 400: # Path exists Settings.ImagePaths.append(new_path) else: raise OSError("Unable to connect to " + new_path) else: raise OSError("File not found: " + new_path)
[ "def", "addImagePath", "(", "new_path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "new_path", ")", ":", "Settings", ".", "ImagePaths", ".", "append", "(", "new_path", ")", "elif", "\"http://\"", "in", "new_path", "or", "\"https://\"", "in", "new_path", ":", "request", "=", "requests", ".", "get", "(", "new_path", ")", "if", "request", ".", "status_code", "<", "400", ":", "# Path exists", "Settings", ".", "ImagePaths", ".", "append", "(", "new_path", ")", "else", ":", "raise", "OSError", "(", "\"Unable to connect to \"", "+", "new_path", ")", "else", ":", "raise", "OSError", "(", "\"File not found: \"", "+", "new_path", ")" ]
Convenience function. Adds a path to the list of paths to search for images. Can be a URL (but must be accessible).
[ "Convenience", "function", ".", "Adds", "a", "path", "to", "the", "list", "of", "paths", "to", "search", "for", "images", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L107-L121
train
glitchassassin/lackey
lackey/__init__.py
unzip
def unzip(from_file, to_folder): """ Convenience function. Extracts files from the zip file `fromFile` into the folder `toFolder`. """ with ZipFile(os.path.abspath(from_file), 'r') as to_unzip: to_unzip.extractall(os.path.abspath(to_folder))
python
def unzip(from_file, to_folder): """ Convenience function. Extracts files from the zip file `fromFile` into the folder `toFolder`. """ with ZipFile(os.path.abspath(from_file), 'r') as to_unzip: to_unzip.extractall(os.path.abspath(to_folder))
[ "def", "unzip", "(", "from_file", ",", "to_folder", ")", ":", "with", "ZipFile", "(", "os", ".", "path", ".", "abspath", "(", "from_file", ")", ",", "'r'", ")", "as", "to_unzip", ":", "to_unzip", ".", "extractall", "(", "os", ".", "path", ".", "abspath", "(", "to_folder", ")", ")" ]
Convenience function. Extracts files from the zip file `fromFile` into the folder `toFolder`.
[ "Convenience", "function", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L146-L151
train
glitchassassin/lackey
lackey/__init__.py
popup
def popup(text, title="Lackey Info"): """ Creates an info dialog with the specified text. """ root = tk.Tk() root.withdraw() tkMessageBox.showinfo(title, text)
python
def popup(text, title="Lackey Info"): """ Creates an info dialog with the specified text. """ root = tk.Tk() root.withdraw() tkMessageBox.showinfo(title, text)
[ "def", "popup", "(", "text", ",", "title", "=", "\"Lackey Info\"", ")", ":", "root", "=", "tk", ".", "Tk", "(", ")", "root", ".", "withdraw", "(", ")", "tkMessageBox", ".", "showinfo", "(", "title", ",", "text", ")" ]
Creates an info dialog with the specified text.
[ "Creates", "an", "info", "dialog", "with", "the", "specified", "text", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L176-L180
train
glitchassassin/lackey
lackey/__init__.py
popError
def popError(text, title="Lackey Error"): """ Creates an error dialog with the specified text. """ root = tk.Tk() root.withdraw() tkMessageBox.showerror(title, text)
python
def popError(text, title="Lackey Error"): """ Creates an error dialog with the specified text. """ root = tk.Tk() root.withdraw() tkMessageBox.showerror(title, text)
[ "def", "popError", "(", "text", ",", "title", "=", "\"Lackey Error\"", ")", ":", "root", "=", "tk", ".", "Tk", "(", ")", "root", ".", "withdraw", "(", ")", "tkMessageBox", ".", "showerror", "(", "title", ",", "text", ")" ]
Creates an error dialog with the specified text.
[ "Creates", "an", "error", "dialog", "with", "the", "specified", "text", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L181-L185
train
glitchassassin/lackey
lackey/__init__.py
popAsk
def popAsk(text, title="Lackey Decision"): """ Creates a yes-no dialog with the specified text. """ root = tk.Tk() root.withdraw() return tkMessageBox.askyesno(title, text)
python
def popAsk(text, title="Lackey Decision"): """ Creates a yes-no dialog with the specified text. """ root = tk.Tk() root.withdraw() return tkMessageBox.askyesno(title, text)
[ "def", "popAsk", "(", "text", ",", "title", "=", "\"Lackey Decision\"", ")", ":", "root", "=", "tk", ".", "Tk", "(", ")", "root", ".", "withdraw", "(", ")", "return", "tkMessageBox", ".", "askyesno", "(", "title", ",", "text", ")" ]
Creates a yes-no dialog with the specified text.
[ "Creates", "a", "yes", "-", "no", "dialog", "with", "the", "specified", "text", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L186-L190
train
glitchassassin/lackey
lackey/__init__.py
input
def input(msg="", default="", title="Lackey Input", hidden=False): """ Creates an input dialog with the specified message and default text. If `hidden`, creates a password dialog instead. Returns the entered value. """ root = tk.Tk() input_text = tk.StringVar() input_text.set(default) PopupInput(root, msg, title, hidden, input_text) root.focus_force() root.mainloop() return str(input_text.get())
python
def input(msg="", default="", title="Lackey Input", hidden=False): """ Creates an input dialog with the specified message and default text. If `hidden`, creates a password dialog instead. Returns the entered value. """ root = tk.Tk() input_text = tk.StringVar() input_text.set(default) PopupInput(root, msg, title, hidden, input_text) root.focus_force() root.mainloop() return str(input_text.get())
[ "def", "input", "(", "msg", "=", "\"\"", ",", "default", "=", "\"\"", ",", "title", "=", "\"Lackey Input\"", ",", "hidden", "=", "False", ")", ":", "root", "=", "tk", ".", "Tk", "(", ")", "input_text", "=", "tk", ".", "StringVar", "(", ")", "input_text", ".", "set", "(", "default", ")", "PopupInput", "(", "root", ",", "msg", ",", "title", ",", "hidden", ",", "input_text", ")", "root", ".", "focus_force", "(", ")", "root", ".", "mainloop", "(", ")", "return", "str", "(", "input_text", ".", "get", "(", ")", ")" ]
Creates an input dialog with the specified message and default text. If `hidden`, creates a password dialog instead. Returns the entered value.
[ "Creates", "an", "input", "dialog", "with", "the", "specified", "message", "and", "default", "text", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L193-L203
train
glitchassassin/lackey
lackey/__init__.py
inputText
def inputText(message="", title="Lackey Input", lines=9, width=20, text=""): """ Creates a textarea dialog with the specified message and default text. Returns the entered value. """ root = tk.Tk() input_text = tk.StringVar() input_text.set(text) PopupTextarea(root, message, title, lines, width, input_text) root.focus_force() root.mainloop() return str(input_text.get())
python
def inputText(message="", title="Lackey Input", lines=9, width=20, text=""): """ Creates a textarea dialog with the specified message and default text. Returns the entered value. """ root = tk.Tk() input_text = tk.StringVar() input_text.set(text) PopupTextarea(root, message, title, lines, width, input_text) root.focus_force() root.mainloop() return str(input_text.get())
[ "def", "inputText", "(", "message", "=", "\"\"", ",", "title", "=", "\"Lackey Input\"", ",", "lines", "=", "9", ",", "width", "=", "20", ",", "text", "=", "\"\"", ")", ":", "root", "=", "tk", ".", "Tk", "(", ")", "input_text", "=", "tk", ".", "StringVar", "(", ")", "input_text", ".", "set", "(", "text", ")", "PopupTextarea", "(", "root", ",", "message", ",", "title", ",", "lines", ",", "width", ",", "input_text", ")", "root", ".", "focus_force", "(", ")", "root", ".", "mainloop", "(", ")", "return", "str", "(", "input_text", ".", "get", "(", ")", ")" ]
Creates a textarea dialog with the specified message and default text. Returns the entered value.
[ "Creates", "a", "textarea", "dialog", "with", "the", "specified", "message", "and", "default", "text", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L204-L214
train
glitchassassin/lackey
lackey/__init__.py
select
def select(message="", title="Lackey Input", options=None, default=None): """ Creates a dropdown selection dialog with the specified message and options `default` must be one of the options. Returns the selected value. """ if options is None or len(options) == 0: return "" if default is None: default = options[0] if default not in options: raise ValueError("<<default>> not in options[]") root = tk.Tk() input_text = tk.StringVar() input_text.set(message) PopupList(root, message, title, options, default, input_text) root.focus_force() root.mainloop() return str(input_text.get())
python
def select(message="", title="Lackey Input", options=None, default=None): """ Creates a dropdown selection dialog with the specified message and options `default` must be one of the options. Returns the selected value. """ if options is None or len(options) == 0: return "" if default is None: default = options[0] if default not in options: raise ValueError("<<default>> not in options[]") root = tk.Tk() input_text = tk.StringVar() input_text.set(message) PopupList(root, message, title, options, default, input_text) root.focus_force() root.mainloop() return str(input_text.get())
[ "def", "select", "(", "message", "=", "\"\"", ",", "title", "=", "\"Lackey Input\"", ",", "options", "=", "None", ",", "default", "=", "None", ")", ":", "if", "options", "is", "None", "or", "len", "(", "options", ")", "==", "0", ":", "return", "\"\"", "if", "default", "is", "None", ":", "default", "=", "options", "[", "0", "]", "if", "default", "not", "in", "options", ":", "raise", "ValueError", "(", "\"<<default>> not in options[]\"", ")", "root", "=", "tk", ".", "Tk", "(", ")", "input_text", "=", "tk", ".", "StringVar", "(", ")", "input_text", ".", "set", "(", "message", ")", "PopupList", "(", "root", ",", "message", ",", "title", ",", "options", ",", "default", ",", "input_text", ")", "root", ".", "focus_force", "(", ")", "root", ".", "mainloop", "(", ")", "return", "str", "(", "input_text", ".", "get", "(", ")", ")" ]
Creates a dropdown selection dialog with the specified message and options `default` must be one of the options. Returns the selected value.
[ "Creates", "a", "dropdown", "selection", "dialog", "with", "the", "specified", "message", "and", "options" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L215-L233
train
glitchassassin/lackey
lackey/__init__.py
popFile
def popFile(title="Lackey Open File"): """ Creates a file selection dialog with the specified message and options. Returns the selected file. """ root = tk.Tk() root.withdraw() return str(tkFileDialog.askopenfilename(title=title))
python
def popFile(title="Lackey Open File"): """ Creates a file selection dialog with the specified message and options. Returns the selected file. """ root = tk.Tk() root.withdraw() return str(tkFileDialog.askopenfilename(title=title))
[ "def", "popFile", "(", "title", "=", "\"Lackey Open File\"", ")", ":", "root", "=", "tk", ".", "Tk", "(", ")", "root", ".", "withdraw", "(", ")", "return", "str", "(", "tkFileDialog", ".", "askopenfilename", "(", "title", "=", "title", ")", ")" ]
Creates a file selection dialog with the specified message and options. Returns the selected file.
[ "Creates", "a", "file", "selection", "dialog", "with", "the", "specified", "message", "and", "options", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L234-L240
train
glitchassassin/lackey
lackey/Geometry.py
Location.setLocation
def setLocation(self, x, y): """Set the location of this object to the specified coordinates.""" self.x = int(x) self.y = int(y) return self
python
def setLocation(self, x, y): """Set the location of this object to the specified coordinates.""" self.x = int(x) self.y = int(y) return self
[ "def", "setLocation", "(", "self", ",", "x", ",", "y", ")", ":", "self", ".", "x", "=", "int", "(", "x", ")", "self", ".", "y", "=", "int", "(", "y", ")", "return", "self" ]
Set the location of this object to the specified coordinates.
[ "Set", "the", "location", "of", "this", "object", "to", "the", "specified", "coordinates", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/Geometry.py#L15-L19
train
glitchassassin/lackey
lackey/Geometry.py
Location.offset
def offset(self, dx, dy): """Get a new location which is dx and dy pixels away horizontally and vertically from the current location. """ return Location(self.x+dx, self.y+dy)
python
def offset(self, dx, dy): """Get a new location which is dx and dy pixels away horizontally and vertically from the current location. """ return Location(self.x+dx, self.y+dy)
[ "def", "offset", "(", "self", ",", "dx", ",", "dy", ")", ":", "return", "Location", "(", "self", ".", "x", "+", "dx", ",", "self", ".", "y", "+", "dy", ")" ]
Get a new location which is dx and dy pixels away horizontally and vertically from the current location.
[ "Get", "a", "new", "location", "which", "is", "dx", "and", "dy", "pixels", "away", "horizontally", "and", "vertically", "from", "the", "current", "location", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/Geometry.py#L22-L26
train
glitchassassin/lackey
lackey/Geometry.py
Location.getOffset
def getOffset(self, loc): """ Returns the offset between the given point and this point """ return Location(loc.x - self.x, loc.y - self.y)
python
def getOffset(self, loc): """ Returns the offset between the given point and this point """ return Location(loc.x - self.x, loc.y - self.y)
[ "def", "getOffset", "(", "self", ",", "loc", ")", ":", "return", "Location", "(", "loc", ".", "x", "-", "self", ".", "x", ",", "loc", ".", "y", "-", "self", ".", "y", ")" ]
Returns the offset between the given point and this point
[ "Returns", "the", "offset", "between", "the", "given", "point", "and", "this", "point" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/Geometry.py#L71-L73
train
glitchassassin/lackey
lackey/Geometry.py
Location.copyTo
def copyTo(self, screen): """ Creates a new point with the same offset on the target screen as this point has on the current screen """ from .RegionMatching import Screen if not isinstance(screen, Screen): screen = RegionMatching.Screen(screen) return screen.getTopLeft().offset(self.getScreen().getTopLeft().getOffset(self))
python
def copyTo(self, screen): """ Creates a new point with the same offset on the target screen as this point has on the current screen """ from .RegionMatching import Screen if not isinstance(screen, Screen): screen = RegionMatching.Screen(screen) return screen.getTopLeft().offset(self.getScreen().getTopLeft().getOffset(self))
[ "def", "copyTo", "(", "self", ",", "screen", ")", ":", "from", ".", "RegionMatching", "import", "Screen", "if", "not", "isinstance", "(", "screen", ",", "Screen", ")", ":", "screen", "=", "RegionMatching", ".", "Screen", "(", "screen", ")", "return", "screen", ".", "getTopLeft", "(", ")", ".", "offset", "(", "self", ".", "getScreen", "(", ")", ".", "getTopLeft", "(", ")", ".", "getOffset", "(", "self", ")", ")" ]
Creates a new point with the same offset on the target screen as this point has on the current screen
[ "Creates", "a", "new", "point", "with", "the", "same", "offset", "on", "the", "target", "screen", "as", "this", "point", "has", "on", "the", "current", "screen" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/Geometry.py#L96-L102
train
glitchassassin/lackey
lackey/App.py
App.focusedWindow
def focusedWindow(cls): """ Returns a Region corresponding to whatever window is in the foreground """ x, y, w, h = PlatformManager.getWindowRect(PlatformManager.getForegroundWindow()) return Region(x, y, w, h)
python
def focusedWindow(cls): """ Returns a Region corresponding to whatever window is in the foreground """ x, y, w, h = PlatformManager.getWindowRect(PlatformManager.getForegroundWindow()) return Region(x, y, w, h)
[ "def", "focusedWindow", "(", "cls", ")", ":", "x", ",", "y", ",", "w", ",", "h", "=", "PlatformManager", ".", "getWindowRect", "(", "PlatformManager", ".", "getForegroundWindow", "(", ")", ")", "return", "Region", "(", "x", ",", "y", ",", "w", ",", "h", ")" ]
Returns a Region corresponding to whatever window is in the foreground
[ "Returns", "a", "Region", "corresponding", "to", "whatever", "window", "is", "in", "the", "foreground" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/App.py#L182-L185
train
glitchassassin/lackey
lackey/App.py
App.getWindow
def getWindow(self): """ Returns the title of the main window of the currently open app. Returns an empty string if no match could be found. """ if self.getPID() != -1: return PlatformManager.getWindowTitle(PlatformManager.getWindowByPID(self.getPID())) else: return ""
python
def getWindow(self): """ Returns the title of the main window of the currently open app. Returns an empty string if no match could be found. """ if self.getPID() != -1: return PlatformManager.getWindowTitle(PlatformManager.getWindowByPID(self.getPID())) else: return ""
[ "def", "getWindow", "(", "self", ")", ":", "if", "self", ".", "getPID", "(", ")", "!=", "-", "1", ":", "return", "PlatformManager", ".", "getWindowTitle", "(", "PlatformManager", ".", "getWindowByPID", "(", "self", ".", "getPID", "(", ")", ")", ")", "else", ":", "return", "\"\"" ]
Returns the title of the main window of the currently open app. Returns an empty string if no match could be found.
[ "Returns", "the", "title", "of", "the", "main", "window", "of", "the", "currently", "open", "app", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/App.py#L187-L195
train
glitchassassin/lackey
lackey/App.py
App.window
def window(self, windowNum=0): """ Returns the region corresponding to the specified window of the app. Defaults to the first window found for the corresponding PID. """ if self._pid == -1: return None x,y,w,h = PlatformManager.getWindowRect(PlatformManager.getWindowByPID(self._pid, windowNum)) return Region(x,y,w,h).clipRegionToScreen()
python
def window(self, windowNum=0): """ Returns the region corresponding to the specified window of the app. Defaults to the first window found for the corresponding PID. """ if self._pid == -1: return None x,y,w,h = PlatformManager.getWindowRect(PlatformManager.getWindowByPID(self._pid, windowNum)) return Region(x,y,w,h).clipRegionToScreen()
[ "def", "window", "(", "self", ",", "windowNum", "=", "0", ")", ":", "if", "self", ".", "_pid", "==", "-", "1", ":", "return", "None", "x", ",", "y", ",", "w", ",", "h", "=", "PlatformManager", ".", "getWindowRect", "(", "PlatformManager", ".", "getWindowByPID", "(", "self", ".", "_pid", ",", "windowNum", ")", ")", "return", "Region", "(", "x", ",", "y", ",", "w", ",", "h", ")", ".", "clipRegionToScreen", "(", ")" ]
Returns the region corresponding to the specified window of the app. Defaults to the first window found for the corresponding PID.
[ "Returns", "the", "region", "corresponding", "to", "the", "specified", "window", "of", "the", "app", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/App.py#L220-L228
train
glitchassassin/lackey
lackey/App.py
App.isRunning
def isRunning(self, waitTime=0): """ If PID isn't set yet, checks if there is a window with the specified title. """ waitUntil = time.time() + waitTime while True: if self.getPID() > 0: return True else: self._pid = PlatformManager.getWindowPID(PlatformManager.getWindowByTitle(re.escape(self._title))) # Check if we've waited long enough if time.time() > waitUntil: break else: time.sleep(self._defaultScanRate) return self.getPID() > 0
python
def isRunning(self, waitTime=0): """ If PID isn't set yet, checks if there is a window with the specified title. """ waitUntil = time.time() + waitTime while True: if self.getPID() > 0: return True else: self._pid = PlatformManager.getWindowPID(PlatformManager.getWindowByTitle(re.escape(self._title))) # Check if we've waited long enough if time.time() > waitUntil: break else: time.sleep(self._defaultScanRate) return self.getPID() > 0
[ "def", "isRunning", "(", "self", ",", "waitTime", "=", "0", ")", ":", "waitUntil", "=", "time", ".", "time", "(", ")", "+", "waitTime", "while", "True", ":", "if", "self", ".", "getPID", "(", ")", ">", "0", ":", "return", "True", "else", ":", "self", ".", "_pid", "=", "PlatformManager", ".", "getWindowPID", "(", "PlatformManager", ".", "getWindowByTitle", "(", "re", ".", "escape", "(", "self", ".", "_title", ")", ")", ")", "# Check if we've waited long enough", "if", "time", ".", "time", "(", ")", ">", "waitUntil", ":", "break", "else", ":", "time", ".", "sleep", "(", "self", ".", "_defaultScanRate", ")", "return", "self", ".", "getPID", "(", ")", ">", "0" ]
If PID isn't set yet, checks if there is a window with the specified title.
[ "If", "PID", "isn", "t", "set", "yet", "checks", "if", "there", "is", "a", "window", "with", "the", "specified", "title", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/App.py#L237-L251
train
glitchassassin/lackey
lackey/PlatformManagerDarwin.py
PlatformManagerDarwin._getVirtualScreenBitmap
def _getVirtualScreenBitmap(self): """ Returns a bitmap of all attached screens """ filenames = [] screen_details = self.getScreenDetails() for screen in screen_details: fh, filepath = tempfile.mkstemp('.png') filenames.append(filepath) os.close(fh) subprocess.call(['screencapture', '-x'] + filenames) min_x, min_y, screen_w, screen_h = self._getVirtualScreenRect() virtual_screen = Image.new("RGB", (screen_w, screen_h)) for filename, screen in zip(filenames, screen_details): # Capture virtscreen coordinates of monitor x, y, w, h = screen["rect"] # Convert image size if needed im = Image.open(filename) im.load() if im.size[0] != w or im.size[1] != h: im = im.resize((int(w), int(h)), Image.ANTIALIAS) # Convert to image-local coordinates x = x - min_x y = y - min_y # Paste on the virtual screen virtual_screen.paste(im, (x, y)) os.unlink(filename) return virtual_screen
python
def _getVirtualScreenBitmap(self): """ Returns a bitmap of all attached screens """ filenames = [] screen_details = self.getScreenDetails() for screen in screen_details: fh, filepath = tempfile.mkstemp('.png') filenames.append(filepath) os.close(fh) subprocess.call(['screencapture', '-x'] + filenames) min_x, min_y, screen_w, screen_h = self._getVirtualScreenRect() virtual_screen = Image.new("RGB", (screen_w, screen_h)) for filename, screen in zip(filenames, screen_details): # Capture virtscreen coordinates of monitor x, y, w, h = screen["rect"] # Convert image size if needed im = Image.open(filename) im.load() if im.size[0] != w or im.size[1] != h: im = im.resize((int(w), int(h)), Image.ANTIALIAS) # Convert to image-local coordinates x = x - min_x y = y - min_y # Paste on the virtual screen virtual_screen.paste(im, (x, y)) os.unlink(filename) return virtual_screen
[ "def", "_getVirtualScreenBitmap", "(", "self", ")", ":", "filenames", "=", "[", "]", "screen_details", "=", "self", ".", "getScreenDetails", "(", ")", "for", "screen", "in", "screen_details", ":", "fh", ",", "filepath", "=", "tempfile", ".", "mkstemp", "(", "'.png'", ")", "filenames", ".", "append", "(", "filepath", ")", "os", ".", "close", "(", "fh", ")", "subprocess", ".", "call", "(", "[", "'screencapture'", ",", "'-x'", "]", "+", "filenames", ")", "min_x", ",", "min_y", ",", "screen_w", ",", "screen_h", "=", "self", ".", "_getVirtualScreenRect", "(", ")", "virtual_screen", "=", "Image", ".", "new", "(", "\"RGB\"", ",", "(", "screen_w", ",", "screen_h", ")", ")", "for", "filename", ",", "screen", "in", "zip", "(", "filenames", ",", "screen_details", ")", ":", "# Capture virtscreen coordinates of monitor", "x", ",", "y", ",", "w", ",", "h", "=", "screen", "[", "\"rect\"", "]", "# Convert image size if needed", "im", "=", "Image", ".", "open", "(", "filename", ")", "im", ".", "load", "(", ")", "if", "im", ".", "size", "[", "0", "]", "!=", "w", "or", "im", ".", "size", "[", "1", "]", "!=", "h", ":", "im", "=", "im", ".", "resize", "(", "(", "int", "(", "w", ")", ",", "int", "(", "h", ")", ")", ",", "Image", ".", "ANTIALIAS", ")", "# Convert to image-local coordinates", "x", "=", "x", "-", "min_x", "y", "=", "y", "-", "min_y", "# Paste on the virtual screen", "virtual_screen", ".", "paste", "(", "im", ",", "(", "x", ",", "y", ")", ")", "os", ".", "unlink", "(", "filename", ")", "return", "virtual_screen" ]
Returns a bitmap of all attached screens
[ "Returns", "a", "bitmap", "of", "all", "attached", "screens" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerDarwin.py#L228-L254
train
glitchassassin/lackey
lackey/PlatformManagerDarwin.py
PlatformManagerDarwin.osCopy
def osCopy(self): """ Triggers the OS "copy" keyboard shortcut """ k = Keyboard() k.keyDown("{CTRL}") k.type("c") k.keyUp("{CTRL}")
python
def osCopy(self): """ Triggers the OS "copy" keyboard shortcut """ k = Keyboard() k.keyDown("{CTRL}") k.type("c") k.keyUp("{CTRL}")
[ "def", "osCopy", "(", "self", ")", ":", "k", "=", "Keyboard", "(", ")", "k", ".", "keyDown", "(", "\"{CTRL}\"", ")", "k", ".", "type", "(", "\"c\"", ")", "k", ".", "keyUp", "(", "\"{CTRL}\"", ")" ]
Triggers the OS "copy" keyboard shortcut
[ "Triggers", "the", "OS", "copy", "keyboard", "shortcut" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerDarwin.py#L287-L292
train
glitchassassin/lackey
lackey/PlatformManagerDarwin.py
PlatformManagerDarwin.getForegroundWindow
def getForegroundWindow(self): """ Returns a handle to the window in the foreground """ active_app = NSWorkspace.sharedWorkspace().frontmostApplication().localizedName() for w in self._get_window_list(): if "kCGWindowOwnerName" in w and w["kCGWindowOwnerName"] == active_app: return w["kCGWindowNumber"]
python
def getForegroundWindow(self): """ Returns a handle to the window in the foreground """ active_app = NSWorkspace.sharedWorkspace().frontmostApplication().localizedName() for w in self._get_window_list(): if "kCGWindowOwnerName" in w and w["kCGWindowOwnerName"] == active_app: return w["kCGWindowNumber"]
[ "def", "getForegroundWindow", "(", "self", ")", ":", "active_app", "=", "NSWorkspace", ".", "sharedWorkspace", "(", ")", ".", "frontmostApplication", "(", ")", ".", "localizedName", "(", ")", "for", "w", "in", "self", ".", "_get_window_list", "(", ")", ":", "if", "\"kCGWindowOwnerName\"", "in", "w", "and", "w", "[", "\"kCGWindowOwnerName\"", "]", "==", "active_app", ":", "return", "w", "[", "\"kCGWindowNumber\"", "]" ]
Returns a handle to the window in the foreground
[ "Returns", "a", "handle", "to", "the", "window", "in", "the", "foreground" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerDarwin.py#L347-L352
train
glitchassassin/lackey
lackey/PlatformManagerDarwin.py
PlatformManagerDarwin._get_window_list
def _get_window_list(self): """ Returns a dictionary of details about open windows """ window_list = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements, Quartz.kCGNullWindowID) return window_list
python
def _get_window_list(self): """ Returns a dictionary of details about open windows """ window_list = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements, Quartz.kCGNullWindowID) return window_list
[ "def", "_get_window_list", "(", "self", ")", ":", "window_list", "=", "Quartz", ".", "CGWindowListCopyWindowInfo", "(", "Quartz", ".", "kCGWindowListExcludeDesktopElements", ",", "Quartz", ".", "kCGNullWindowID", ")", "return", "window_list" ]
Returns a dictionary of details about open windows
[ "Returns", "a", "dictionary", "of", "details", "about", "open", "windows" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerDarwin.py#L354-L357
train
glitchassassin/lackey
lackey/PlatformManagerDarwin.py
PlatformManagerDarwin.getProcessName
def getProcessName(self, pid): """ Searches all processes for the given PID, then returns the originating command """ ps = subprocess.check_output(["ps", "aux"]).decode("ascii") processes = ps.split("\n") cols = len(processes[0].split()) - 1 for row in processes[1:]: if row != "": proc = row.split(None, cols) if proc[1].strip() == str(pid): return proc[-1]
python
def getProcessName(self, pid): """ Searches all processes for the given PID, then returns the originating command """ ps = subprocess.check_output(["ps", "aux"]).decode("ascii") processes = ps.split("\n") cols = len(processes[0].split()) - 1 for row in processes[1:]: if row != "": proc = row.split(None, cols) if proc[1].strip() == str(pid): return proc[-1]
[ "def", "getProcessName", "(", "self", ",", "pid", ")", ":", "ps", "=", "subprocess", ".", "check_output", "(", "[", "\"ps\"", ",", "\"aux\"", "]", ")", ".", "decode", "(", "\"ascii\"", ")", "processes", "=", "ps", ".", "split", "(", "\"\\n\"", ")", "cols", "=", "len", "(", "processes", "[", "0", "]", ".", "split", "(", ")", ")", "-", "1", "for", "row", "in", "processes", "[", "1", ":", "]", ":", "if", "row", "!=", "\"\"", ":", "proc", "=", "row", ".", "split", "(", "None", ",", "cols", ")", "if", "proc", "[", "1", "]", ".", "strip", "(", ")", "==", "str", "(", "pid", ")", ":", "return", "proc", "[", "-", "1", "]" ]
Searches all processes for the given PID, then returns the originating command
[ "Searches", "all", "processes", "for", "the", "given", "PID", "then", "returns", "the", "originating", "command" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerDarwin.py#L407-L416
train
glitchassassin/lackey
lackey/InputEmulation.py
Mouse.moveSpeed
def moveSpeed(self, location, seconds=0.3): """ Moves cursor to specified ``Location`` over ``seconds``. If ``seconds`` is 0, moves the cursor immediately. Used for smooth somewhat-human-like motion. """ self._lock.acquire() original_location = mouse.get_position() mouse.move(location.x, location.y, duration=seconds) if mouse.get_position() == original_location and original_location != location.getTuple(): raise IOError(""" Unable to move mouse cursor. This may happen if you're trying to automate a program running as Administrator with a script running as a non-elevated user. """) self._lock.release()
python
def moveSpeed(self, location, seconds=0.3): """ Moves cursor to specified ``Location`` over ``seconds``. If ``seconds`` is 0, moves the cursor immediately. Used for smooth somewhat-human-like motion. """ self._lock.acquire() original_location = mouse.get_position() mouse.move(location.x, location.y, duration=seconds) if mouse.get_position() == original_location and original_location != location.getTuple(): raise IOError(""" Unable to move mouse cursor. This may happen if you're trying to automate a program running as Administrator with a script running as a non-elevated user. """) self._lock.release()
[ "def", "moveSpeed", "(", "self", ",", "location", ",", "seconds", "=", "0.3", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "original_location", "=", "mouse", ".", "get_position", "(", ")", "mouse", ".", "move", "(", "location", ".", "x", ",", "location", ".", "y", ",", "duration", "=", "seconds", ")", "if", "mouse", ".", "get_position", "(", ")", "==", "original_location", "and", "original_location", "!=", "location", ".", "getTuple", "(", ")", ":", "raise", "IOError", "(", "\"\"\"\n Unable to move mouse cursor. This may happen if you're trying to automate a \n program running as Administrator with a script running as a non-elevated user.\n \"\"\"", ")", "self", ".", "_lock", ".", "release", "(", ")" ]
Moves cursor to specified ``Location`` over ``seconds``. If ``seconds`` is 0, moves the cursor immediately. Used for smooth somewhat-human-like motion.
[ "Moves", "cursor", "to", "specified", "Location", "over", "seconds", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/InputEmulation.py#L59-L73
train
glitchassassin/lackey
lackey/InputEmulation.py
Mouse.click
def click(self, loc=None, button=mouse.LEFT): """ Clicks the specified mouse button. If ``loc`` is set, move the mouse to that Location first. Use button constants Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT """ if loc is not None: self.moveSpeed(loc) self._lock.acquire() mouse.click(button) self._lock.release()
python
def click(self, loc=None, button=mouse.LEFT): """ Clicks the specified mouse button. If ``loc`` is set, move the mouse to that Location first. Use button constants Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT """ if loc is not None: self.moveSpeed(loc) self._lock.acquire() mouse.click(button) self._lock.release()
[ "def", "click", "(", "self", ",", "loc", "=", "None", ",", "button", "=", "mouse", ".", "LEFT", ")", ":", "if", "loc", "is", "not", "None", ":", "self", ".", "moveSpeed", "(", "loc", ")", "self", ".", "_lock", ".", "acquire", "(", ")", "mouse", ".", "click", "(", "button", ")", "self", ".", "_lock", ".", "release", "(", ")" ]
Clicks the specified mouse button. If ``loc`` is set, move the mouse to that Location first. Use button constants Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT
[ "Clicks", "the", "specified", "mouse", "button", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/InputEmulation.py#L75-L86
train
glitchassassin/lackey
lackey/InputEmulation.py
Mouse.buttonDown
def buttonDown(self, button=mouse.LEFT): """ Holds down the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT """ self._lock.acquire() mouse.press(button) self._lock.release()
python
def buttonDown(self, button=mouse.LEFT): """ Holds down the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT """ self._lock.acquire() mouse.press(button) self._lock.release()
[ "def", "buttonDown", "(", "self", ",", "button", "=", "mouse", ".", "LEFT", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "mouse", ".", "press", "(", "button", ")", "self", ".", "_lock", ".", "release", "(", ")" ]
Holds down the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT
[ "Holds", "down", "the", "specified", "mouse", "button", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/InputEmulation.py#L87-L94
train
glitchassassin/lackey
lackey/InputEmulation.py
Mouse.buttonUp
def buttonUp(self, button=mouse.LEFT): """ Releases the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT """ self._lock.acquire() mouse.release(button) self._lock.release()
python
def buttonUp(self, button=mouse.LEFT): """ Releases the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT """ self._lock.acquire() mouse.release(button) self._lock.release()
[ "def", "buttonUp", "(", "self", ",", "button", "=", "mouse", ".", "LEFT", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "mouse", ".", "release", "(", "button", ")", "self", ".", "_lock", ".", "release", "(", ")" ]
Releases the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT
[ "Releases", "the", "specified", "mouse", "button", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/InputEmulation.py#L96-L103
train
glitchassassin/lackey
lackey/InputEmulation.py
Mouse.wheel
def wheel(self, direction, steps): """ Clicks the wheel the specified number of steps in the given direction. Use Mouse.WHEEL_DOWN, Mouse.WHEEL_UP """ self._lock.acquire() if direction == 1: wheel_moved = steps elif direction == 0: wheel_moved = -1*steps else: raise ValueError("Expected direction to be 1 or 0") self._lock.release() return mouse.wheel(wheel_moved)
python
def wheel(self, direction, steps): """ Clicks the wheel the specified number of steps in the given direction. Use Mouse.WHEEL_DOWN, Mouse.WHEEL_UP """ self._lock.acquire() if direction == 1: wheel_moved = steps elif direction == 0: wheel_moved = -1*steps else: raise ValueError("Expected direction to be 1 or 0") self._lock.release() return mouse.wheel(wheel_moved)
[ "def", "wheel", "(", "self", ",", "direction", ",", "steps", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "if", "direction", "==", "1", ":", "wheel_moved", "=", "steps", "elif", "direction", "==", "0", ":", "wheel_moved", "=", "-", "1", "*", "steps", "else", ":", "raise", "ValueError", "(", "\"Expected direction to be 1 or 0\"", ")", "self", ".", "_lock", ".", "release", "(", ")", "return", "mouse", ".", "wheel", "(", "wheel_moved", ")" ]
Clicks the wheel the specified number of steps in the given direction. Use Mouse.WHEEL_DOWN, Mouse.WHEEL_UP
[ "Clicks", "the", "wheel", "the", "specified", "number", "of", "steps", "in", "the", "given", "direction", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/InputEmulation.py#L105-L118
train
glitchassassin/lackey
lackey/InputEmulation.py
Keyboard.type
def type(self, text, delay=0.1): """ Translates a string into a series of keystrokes. Respects Sikuli special codes, like "{ENTER}". """ in_special_code = False special_code = "" modifier_held = False modifier_stuck = False modifier_codes = [] for i in range(0, len(text)): if text[i] == "{": in_special_code = True elif in_special_code and (text[i] == "}" or text[i] == " " or i == len(text)-1): in_special_code = False if special_code in self._SPECIAL_KEYCODES.keys(): # Found a special code keyboard.press_and_release(self._SPECIAL_KEYCODES[special_code]) else: # Wasn't a special code, just treat it as keystrokes keyboard.press(self._SPECIAL_KEYCODES["SHIFT"]) keyboard.press_and_release(self._UPPERCASE_KEYCODES["{"]) keyboard.release(self._SPECIAL_KEYCODES["SHIFT"]) # Release the rest of the keys normally self.type(special_code) self.type(text[i]) special_code = "" elif in_special_code: special_code += text[i] elif text[i] in self._REGULAR_KEYCODES.keys(): keyboard.press(self._REGULAR_KEYCODES[text[i]]) keyboard.release(self._REGULAR_KEYCODES[text[i]]) elif text[i] in self._UPPERCASE_KEYCODES.keys(): keyboard.press(self._SPECIAL_KEYCODES["SHIFT"]) keyboard.press_and_release(self._UPPERCASE_KEYCODES[text[i]]) keyboard.release(self._SPECIAL_KEYCODES["SHIFT"]) if delay and not in_special_code: time.sleep(delay)
python
def type(self, text, delay=0.1): """ Translates a string into a series of keystrokes. Respects Sikuli special codes, like "{ENTER}". """ in_special_code = False special_code = "" modifier_held = False modifier_stuck = False modifier_codes = [] for i in range(0, len(text)): if text[i] == "{": in_special_code = True elif in_special_code and (text[i] == "}" or text[i] == " " or i == len(text)-1): in_special_code = False if special_code in self._SPECIAL_KEYCODES.keys(): # Found a special code keyboard.press_and_release(self._SPECIAL_KEYCODES[special_code]) else: # Wasn't a special code, just treat it as keystrokes keyboard.press(self._SPECIAL_KEYCODES["SHIFT"]) keyboard.press_and_release(self._UPPERCASE_KEYCODES["{"]) keyboard.release(self._SPECIAL_KEYCODES["SHIFT"]) # Release the rest of the keys normally self.type(special_code) self.type(text[i]) special_code = "" elif in_special_code: special_code += text[i] elif text[i] in self._REGULAR_KEYCODES.keys(): keyboard.press(self._REGULAR_KEYCODES[text[i]]) keyboard.release(self._REGULAR_KEYCODES[text[i]]) elif text[i] in self._UPPERCASE_KEYCODES.keys(): keyboard.press(self._SPECIAL_KEYCODES["SHIFT"]) keyboard.press_and_release(self._UPPERCASE_KEYCODES[text[i]]) keyboard.release(self._SPECIAL_KEYCODES["SHIFT"]) if delay and not in_special_code: time.sleep(delay)
[ "def", "type", "(", "self", ",", "text", ",", "delay", "=", "0.1", ")", ":", "in_special_code", "=", "False", "special_code", "=", "\"\"", "modifier_held", "=", "False", "modifier_stuck", "=", "False", "modifier_codes", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "text", ")", ")", ":", "if", "text", "[", "i", "]", "==", "\"{\"", ":", "in_special_code", "=", "True", "elif", "in_special_code", "and", "(", "text", "[", "i", "]", "==", "\"}\"", "or", "text", "[", "i", "]", "==", "\" \"", "or", "i", "==", "len", "(", "text", ")", "-", "1", ")", ":", "in_special_code", "=", "False", "if", "special_code", "in", "self", ".", "_SPECIAL_KEYCODES", ".", "keys", "(", ")", ":", "# Found a special code", "keyboard", ".", "press_and_release", "(", "self", ".", "_SPECIAL_KEYCODES", "[", "special_code", "]", ")", "else", ":", "# Wasn't a special code, just treat it as keystrokes", "keyboard", ".", "press", "(", "self", ".", "_SPECIAL_KEYCODES", "[", "\"SHIFT\"", "]", ")", "keyboard", ".", "press_and_release", "(", "self", ".", "_UPPERCASE_KEYCODES", "[", "\"{\"", "]", ")", "keyboard", ".", "release", "(", "self", ".", "_SPECIAL_KEYCODES", "[", "\"SHIFT\"", "]", ")", "# Release the rest of the keys normally", "self", ".", "type", "(", "special_code", ")", "self", ".", "type", "(", "text", "[", "i", "]", ")", "special_code", "=", "\"\"", "elif", "in_special_code", ":", "special_code", "+=", "text", "[", "i", "]", "elif", "text", "[", "i", "]", "in", "self", ".", "_REGULAR_KEYCODES", ".", "keys", "(", ")", ":", "keyboard", ".", "press", "(", "self", ".", "_REGULAR_KEYCODES", "[", "text", "[", "i", "]", "]", ")", "keyboard", ".", "release", "(", "self", ".", "_REGULAR_KEYCODES", "[", "text", "[", "i", "]", "]", ")", "elif", "text", "[", "i", "]", "in", "self", ".", "_UPPERCASE_KEYCODES", ".", "keys", "(", ")", ":", "keyboard", ".", "press", "(", "self", ".", "_SPECIAL_KEYCODES", "[", "\"SHIFT\"", "]", ")", "keyboard", ".", "press_and_release", "(", "self", ".", "_UPPERCASE_KEYCODES", "[", "text", "[", "i", "]", "]", ")", "keyboard", ".", "release", "(", "self", ".", "_SPECIAL_KEYCODES", "[", "\"SHIFT\"", "]", ")", "if", "delay", "and", "not", "in_special_code", ":", "time", ".", "sleep", "(", "delay", ")" ]
Translates a string into a series of keystrokes. Respects Sikuli special codes, like "{ENTER}".
[ "Translates", "a", "string", "into", "a", "series", "of", "keystrokes", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/InputEmulation.py#L347-L385
train
glitchassassin/lackey
lackey/TemplateMatchers.py
NaiveTemplateMatcher.findBestMatch
def findBestMatch(self, needle, similarity): """ Find the best match for ``needle`` that has a similarity better than or equal to ``similarity``. Returns a tuple of ``(position, confidence)`` if a match is found, or ``None`` otherwise. *Developer's Note - Despite the name, this method actually returns the **first** result with enough similarity, not the **best** result.* """ method = cv2.TM_CCOEFF_NORMED position = None match = cv2.matchTemplate(self.haystack, needle, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match) if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF: confidence = min_val if min_val <= 1-similarity: # Confidence checks out position = min_loc else: confidence = max_val if max_val >= similarity: # Confidence checks out position = max_loc if not position: return None return (position, confidence)
python
def findBestMatch(self, needle, similarity): """ Find the best match for ``needle`` that has a similarity better than or equal to ``similarity``. Returns a tuple of ``(position, confidence)`` if a match is found, or ``None`` otherwise. *Developer's Note - Despite the name, this method actually returns the **first** result with enough similarity, not the **best** result.* """ method = cv2.TM_CCOEFF_NORMED position = None match = cv2.matchTemplate(self.haystack, needle, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match) if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF: confidence = min_val if min_val <= 1-similarity: # Confidence checks out position = min_loc else: confidence = max_val if max_val >= similarity: # Confidence checks out position = max_loc if not position: return None return (position, confidence)
[ "def", "findBestMatch", "(", "self", ",", "needle", ",", "similarity", ")", ":", "method", "=", "cv2", ".", "TM_CCOEFF_NORMED", "position", "=", "None", "match", "=", "cv2", ".", "matchTemplate", "(", "self", ".", "haystack", ",", "needle", ",", "method", ")", "min_val", ",", "max_val", ",", "min_loc", ",", "max_loc", "=", "cv2", ".", "minMaxLoc", "(", "match", ")", "if", "method", "==", "cv2", ".", "TM_SQDIFF_NORMED", "or", "method", "==", "cv2", ".", "TM_SQDIFF", ":", "confidence", "=", "min_val", "if", "min_val", "<=", "1", "-", "similarity", ":", "# Confidence checks out", "position", "=", "min_loc", "else", ":", "confidence", "=", "max_val", "if", "max_val", ">=", "similarity", ":", "# Confidence checks out", "position", "=", "max_loc", "if", "not", "position", ":", "return", "None", "return", "(", "position", ",", "confidence", ")" ]
Find the best match for ``needle`` that has a similarity better than or equal to ``similarity``. Returns a tuple of ``(position, confidence)`` if a match is found, or ``None`` otherwise. *Developer's Note - Despite the name, this method actually returns the **first** result with enough similarity, not the **best** result.*
[ "Find", "the", "best", "match", "for", "needle", "that", "has", "a", "similarity", "better", "than", "or", "equal", "to", "similarity", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/TemplateMatchers.py#L16-L43
train
glitchassassin/lackey
lackey/TemplateMatchers.py
NaiveTemplateMatcher.findAllMatches
def findAllMatches(self, needle, similarity): """ Find all matches for ``needle`` with confidence better than or equal to ``similarity``. Returns an array of tuples ``(position, confidence)`` if match(es) is/are found, or an empty array otherwise. """ positions = [] method = cv2.TM_CCOEFF_NORMED match = cv2.matchTemplate(self.haystack, self.needle, method) indices = (-match).argpartition(100, axis=None)[:100] # Review the 100 top matches unraveled_indices = numpy.array(numpy.unravel_index(indices, match.shape)).T for location in unraveled_indices: y, x = location confidence = match[y][x] if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF: if confidence <= 1-similarity: positions.append(((x, y), confidence)) else: if confidence >= similarity: positions.append(((x, y), confidence)) positions.sort(key=lambda x: (x[0][1], x[0][0])) return positions
python
def findAllMatches(self, needle, similarity): """ Find all matches for ``needle`` with confidence better than or equal to ``similarity``. Returns an array of tuples ``(position, confidence)`` if match(es) is/are found, or an empty array otherwise. """ positions = [] method = cv2.TM_CCOEFF_NORMED match = cv2.matchTemplate(self.haystack, self.needle, method) indices = (-match).argpartition(100, axis=None)[:100] # Review the 100 top matches unraveled_indices = numpy.array(numpy.unravel_index(indices, match.shape)).T for location in unraveled_indices: y, x = location confidence = match[y][x] if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF: if confidence <= 1-similarity: positions.append(((x, y), confidence)) else: if confidence >= similarity: positions.append(((x, y), confidence)) positions.sort(key=lambda x: (x[0][1], x[0][0])) return positions
[ "def", "findAllMatches", "(", "self", ",", "needle", ",", "similarity", ")", ":", "positions", "=", "[", "]", "method", "=", "cv2", ".", "TM_CCOEFF_NORMED", "match", "=", "cv2", ".", "matchTemplate", "(", "self", ".", "haystack", ",", "self", ".", "needle", ",", "method", ")", "indices", "=", "(", "-", "match", ")", ".", "argpartition", "(", "100", ",", "axis", "=", "None", ")", "[", ":", "100", "]", "# Review the 100 top matches", "unraveled_indices", "=", "numpy", ".", "array", "(", "numpy", ".", "unravel_index", "(", "indices", ",", "match", ".", "shape", ")", ")", ".", "T", "for", "location", "in", "unraveled_indices", ":", "y", ",", "x", "=", "location", "confidence", "=", "match", "[", "y", "]", "[", "x", "]", "if", "method", "==", "cv2", ".", "TM_SQDIFF_NORMED", "or", "method", "==", "cv2", ".", "TM_SQDIFF", ":", "if", "confidence", "<=", "1", "-", "similarity", ":", "positions", ".", "append", "(", "(", "(", "x", ",", "y", ")", ",", "confidence", ")", ")", "else", ":", "if", "confidence", ">=", "similarity", ":", "positions", ".", "append", "(", "(", "(", "x", ",", "y", ")", ",", "confidence", ")", ")", "positions", ".", "sort", "(", "key", "=", "lambda", "x", ":", "(", "x", "[", "0", "]", "[", "1", "]", ",", "x", "[", "0", "]", "[", "0", "]", ")", ")", "return", "positions" ]
Find all matches for ``needle`` with confidence better than or equal to ``similarity``. Returns an array of tuples ``(position, confidence)`` if match(es) is/are found, or an empty array otherwise.
[ "Find", "all", "matches", "for", "needle", "with", "confidence", "better", "than", "or", "equal", "to", "similarity", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/TemplateMatchers.py#L45-L69
train
glitchassassin/lackey
lackey/TemplateMatchers.py
PyramidTemplateMatcher.findAllMatches
def findAllMatches(self, needle, similarity): """ Finds all matches above ``similarity`` using a search pyramid to improve efficiency Pyramid implementation unashamedly stolen from https://github.com/stb-tester/stb-tester """ positions = [] # Use findBestMatch to get the best match while True: best_match = self.findBestMatch(needle, similarity) if best_match is None: # No more matches break # Found a match. Add it to our list positions.append(best_match) # (position, confidence) # Erase the found match from the haystack. # Repeat this process until no other matches are found x, y = best_match[0] w = needle.shape[1] h = needle.shape[0] roi = (x, y, w, h) # numpy 2D slice roi_slice = (slice(roi[1], roi[1]+roi[3]), slice(roi[0], roi[0]+roi[2])) self.haystack[roi_slice] = 0 # Whew! Let's see if there's a match after all that. positions.sort(key=lambda x: (x[0][1], x[0][0])) return positions
python
def findAllMatches(self, needle, similarity): """ Finds all matches above ``similarity`` using a search pyramid to improve efficiency Pyramid implementation unashamedly stolen from https://github.com/stb-tester/stb-tester """ positions = [] # Use findBestMatch to get the best match while True: best_match = self.findBestMatch(needle, similarity) if best_match is None: # No more matches break # Found a match. Add it to our list positions.append(best_match) # (position, confidence) # Erase the found match from the haystack. # Repeat this process until no other matches are found x, y = best_match[0] w = needle.shape[1] h = needle.shape[0] roi = (x, y, w, h) # numpy 2D slice roi_slice = (slice(roi[1], roi[1]+roi[3]), slice(roi[0], roi[0]+roi[2])) self.haystack[roi_slice] = 0 # Whew! Let's see if there's a match after all that. positions.sort(key=lambda x: (x[0][1], x[0][0])) return positions
[ "def", "findAllMatches", "(", "self", ",", "needle", ",", "similarity", ")", ":", "positions", "=", "[", "]", "# Use findBestMatch to get the best match", "while", "True", ":", "best_match", "=", "self", ".", "findBestMatch", "(", "needle", ",", "similarity", ")", "if", "best_match", "is", "None", ":", "# No more matches", "break", "# Found a match. Add it to our list", "positions", ".", "append", "(", "best_match", ")", "# (position, confidence)", "# Erase the found match from the haystack.", "# Repeat this process until no other matches are found", "x", ",", "y", "=", "best_match", "[", "0", "]", "w", "=", "needle", ".", "shape", "[", "1", "]", "h", "=", "needle", ".", "shape", "[", "0", "]", "roi", "=", "(", "x", ",", "y", ",", "w", ",", "h", ")", "# numpy 2D slice", "roi_slice", "=", "(", "slice", "(", "roi", "[", "1", "]", ",", "roi", "[", "1", "]", "+", "roi", "[", "3", "]", ")", ",", "slice", "(", "roi", "[", "0", "]", ",", "roi", "[", "0", "]", "+", "roi", "[", "2", "]", ")", ")", "self", ".", "haystack", "[", "roi_slice", "]", "=", "0", "# Whew! Let's see if there's a match after all that.", "positions", ".", "sort", "(", "key", "=", "lambda", "x", ":", "(", "x", "[", "0", "]", "[", "1", "]", ",", "x", "[", "0", "]", "[", "0", "]", ")", ")", "return", "positions" ]
Finds all matches above ``similarity`` using a search pyramid to improve efficiency Pyramid implementation unashamedly stolen from https://github.com/stb-tester/stb-tester
[ "Finds", "all", "matches", "above", "similarity", "using", "a", "search", "pyramid", "to", "improve", "efficiency" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/TemplateMatchers.py#L218-L244
train
glitchassassin/lackey
lackey/TemplateMatchers.py
PyramidTemplateMatcher._build_pyramid
def _build_pyramid(self, image, levels): """ Returns a list of reduced-size images, from smallest to original size """ pyramid = [image] for l in range(levels-1): if any(x < 20 for x in pyramid[-1].shape[:2]): break pyramid.append(cv2.pyrDown(pyramid[-1])) return list(reversed(pyramid))
python
def _build_pyramid(self, image, levels): """ Returns a list of reduced-size images, from smallest to original size """ pyramid = [image] for l in range(levels-1): if any(x < 20 for x in pyramid[-1].shape[:2]): break pyramid.append(cv2.pyrDown(pyramid[-1])) return list(reversed(pyramid))
[ "def", "_build_pyramid", "(", "self", ",", "image", ",", "levels", ")", ":", "pyramid", "=", "[", "image", "]", "for", "l", "in", "range", "(", "levels", "-", "1", ")", ":", "if", "any", "(", "x", "<", "20", "for", "x", "in", "pyramid", "[", "-", "1", "]", ".", "shape", "[", ":", "2", "]", ")", ":", "break", "pyramid", ".", "append", "(", "cv2", ".", "pyrDown", "(", "pyramid", "[", "-", "1", "]", ")", ")", "return", "list", "(", "reversed", "(", "pyramid", ")", ")" ]
Returns a list of reduced-size images, from smallest to original size
[ "Returns", "a", "list", "of", "reduced", "-", "size", "images", "from", "smallest", "to", "original", "size" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/TemplateMatchers.py#L246-L253
train
google-research/batch-ppo
agents/tools/count_weights.py
count_weights
def count_weights(scope=None, exclude=None, graph=None): """Count learnable parameters. Args: scope: Restrict the count to a variable scope. exclude: Regex to match variable names to exclude. graph: Operate on a graph other than the current default graph. Returns: Number of learnable parameters as integer. """ if scope: scope = scope if scope.endswith('/') else scope + '/' graph = graph or tf.get_default_graph() vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if scope: vars_ = [var for var in vars_ if var.name.startswith(scope)] if exclude: exclude = re.compile(exclude) vars_ = [var for var in vars_ if not exclude.match(var.name)] shapes = [var.get_shape().as_list() for var in vars_] return int(sum(np.prod(shape) for shape in shapes))
python
def count_weights(scope=None, exclude=None, graph=None): """Count learnable parameters. Args: scope: Restrict the count to a variable scope. exclude: Regex to match variable names to exclude. graph: Operate on a graph other than the current default graph. Returns: Number of learnable parameters as integer. """ if scope: scope = scope if scope.endswith('/') else scope + '/' graph = graph or tf.get_default_graph() vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if scope: vars_ = [var for var in vars_ if var.name.startswith(scope)] if exclude: exclude = re.compile(exclude) vars_ = [var for var in vars_ if not exclude.match(var.name)] shapes = [var.get_shape().as_list() for var in vars_] return int(sum(np.prod(shape) for shape in shapes))
[ "def", "count_weights", "(", "scope", "=", "None", ",", "exclude", "=", "None", ",", "graph", "=", "None", ")", ":", "if", "scope", ":", "scope", "=", "scope", "if", "scope", ".", "endswith", "(", "'/'", ")", "else", "scope", "+", "'/'", "graph", "=", "graph", "or", "tf", ".", "get_default_graph", "(", ")", "vars_", "=", "graph", ".", "get_collection", "(", "tf", ".", "GraphKeys", ".", "TRAINABLE_VARIABLES", ")", "if", "scope", ":", "vars_", "=", "[", "var", "for", "var", "in", "vars_", "if", "var", ".", "name", ".", "startswith", "(", "scope", ")", "]", "if", "exclude", ":", "exclude", "=", "re", ".", "compile", "(", "exclude", ")", "vars_", "=", "[", "var", "for", "var", "in", "vars_", "if", "not", "exclude", ".", "match", "(", "var", ".", "name", ")", "]", "shapes", "=", "[", "var", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "for", "var", "in", "vars_", "]", "return", "int", "(", "sum", "(", "np", ".", "prod", "(", "shape", ")", "for", "shape", "in", "shapes", ")", ")" ]
Count learnable parameters. Args: scope: Restrict the count to a variable scope. exclude: Regex to match variable names to exclude. graph: Operate on a graph other than the current default graph. Returns: Number of learnable parameters as integer.
[ "Count", "learnable", "parameters", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/count_weights.py#L27-L48
train
google-research/batch-ppo
agents/scripts/networks.py
_custom_diag_normal_kl
def _custom_diag_normal_kl(lhs, rhs, name=None): # pylint: disable=unused-argument """Empirical KL divergence of two normals with diagonal covariance. Args: lhs: Diagonal Normal distribution. rhs: Diagonal Normal distribution. name: Name scope for the op. Returns: KL divergence from lhs to rhs. """ with tf.name_scope(name or 'kl_divergence'): mean0 = lhs.mean() mean1 = rhs.mean() logstd0 = tf.log(lhs.stddev()) logstd1 = tf.log(rhs.stddev()) logstd0_2, logstd1_2 = 2 * logstd0, 2 * logstd1 return 0.5 * ( tf.reduce_sum(tf.exp(logstd0_2 - logstd1_2), -1) + tf.reduce_sum((mean1 - mean0) ** 2 / tf.exp(logstd1_2), -1) + tf.reduce_sum(logstd1_2, -1) - tf.reduce_sum(logstd0_2, -1) - mean0.shape[-1].value)
python
def _custom_diag_normal_kl(lhs, rhs, name=None): # pylint: disable=unused-argument """Empirical KL divergence of two normals with diagonal covariance. Args: lhs: Diagonal Normal distribution. rhs: Diagonal Normal distribution. name: Name scope for the op. Returns: KL divergence from lhs to rhs. """ with tf.name_scope(name or 'kl_divergence'): mean0 = lhs.mean() mean1 = rhs.mean() logstd0 = tf.log(lhs.stddev()) logstd1 = tf.log(rhs.stddev()) logstd0_2, logstd1_2 = 2 * logstd0, 2 * logstd1 return 0.5 * ( tf.reduce_sum(tf.exp(logstd0_2 - logstd1_2), -1) + tf.reduce_sum((mean1 - mean0) ** 2 / tf.exp(logstd1_2), -1) + tf.reduce_sum(logstd1_2, -1) - tf.reduce_sum(logstd0_2, -1) - mean0.shape[-1].value)
[ "def", "_custom_diag_normal_kl", "(", "lhs", ",", "rhs", ",", "name", "=", "None", ")", ":", "# pylint: disable=unused-argument", "with", "tf", ".", "name_scope", "(", "name", "or", "'kl_divergence'", ")", ":", "mean0", "=", "lhs", ".", "mean", "(", ")", "mean1", "=", "rhs", ".", "mean", "(", ")", "logstd0", "=", "tf", ".", "log", "(", "lhs", ".", "stddev", "(", ")", ")", "logstd1", "=", "tf", ".", "log", "(", "rhs", ".", "stddev", "(", ")", ")", "logstd0_2", ",", "logstd1_2", "=", "2", "*", "logstd0", ",", "2", "*", "logstd1", "return", "0.5", "*", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "exp", "(", "logstd0_2", "-", "logstd1_2", ")", ",", "-", "1", ")", "+", "tf", ".", "reduce_sum", "(", "(", "mean1", "-", "mean0", ")", "**", "2", "/", "tf", ".", "exp", "(", "logstd1_2", ")", ",", "-", "1", ")", "+", "tf", ".", "reduce_sum", "(", "logstd1_2", ",", "-", "1", ")", "-", "tf", ".", "reduce_sum", "(", "logstd0_2", ",", "-", "1", ")", "-", "mean0", ".", "shape", "[", "-", "1", "]", ".", "value", ")" ]
Empirical KL divergence of two normals with diagonal covariance. Args: lhs: Diagonal Normal distribution. rhs: Diagonal Normal distribution. name: Name scope for the op. Returns: KL divergence from lhs to rhs.
[ "Empirical", "KL", "divergence", "of", "two", "normals", "with", "diagonal", "covariance", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/networks.py#L43-L64
train
google-research/batch-ppo
agents/scripts/utility.py
define_simulation_graph
def define_simulation_graph(batch_env, algo_cls, config): """Define the algorithm and environment interaction. Args: batch_env: In-graph environments object. algo_cls: Constructor of a batch algorithm. config: Configuration object for the algorithm. Returns: Object providing graph elements via attributes. """ # pylint: disable=unused-variable step = tf.Variable(0, False, dtype=tf.int32, name='global_step') is_training = tf.placeholder(tf.bool, name='is_training') should_log = tf.placeholder(tf.bool, name='should_log') do_report = tf.placeholder(tf.bool, name='do_report') force_reset = tf.placeholder(tf.bool, name='force_reset') algo = algo_cls(batch_env, step, is_training, should_log, config) done, score, summary = tools.simulate( batch_env, algo, should_log, force_reset) message = 'Graph contains {} trainable variables.' tf.logging.info(message.format(tools.count_weights())) # pylint: enable=unused-variable return tools.AttrDict(locals())
python
def define_simulation_graph(batch_env, algo_cls, config): """Define the algorithm and environment interaction. Args: batch_env: In-graph environments object. algo_cls: Constructor of a batch algorithm. config: Configuration object for the algorithm. Returns: Object providing graph elements via attributes. """ # pylint: disable=unused-variable step = tf.Variable(0, False, dtype=tf.int32, name='global_step') is_training = tf.placeholder(tf.bool, name='is_training') should_log = tf.placeholder(tf.bool, name='should_log') do_report = tf.placeholder(tf.bool, name='do_report') force_reset = tf.placeholder(tf.bool, name='force_reset') algo = algo_cls(batch_env, step, is_training, should_log, config) done, score, summary = tools.simulate( batch_env, algo, should_log, force_reset) message = 'Graph contains {} trainable variables.' tf.logging.info(message.format(tools.count_weights())) # pylint: enable=unused-variable return tools.AttrDict(locals())
[ "def", "define_simulation_graph", "(", "batch_env", ",", "algo_cls", ",", "config", ")", ":", "# pylint: disable=unused-variable", "step", "=", "tf", ".", "Variable", "(", "0", ",", "False", ",", "dtype", "=", "tf", ".", "int32", ",", "name", "=", "'global_step'", ")", "is_training", "=", "tf", ".", "placeholder", "(", "tf", ".", "bool", ",", "name", "=", "'is_training'", ")", "should_log", "=", "tf", ".", "placeholder", "(", "tf", ".", "bool", ",", "name", "=", "'should_log'", ")", "do_report", "=", "tf", ".", "placeholder", "(", "tf", ".", "bool", ",", "name", "=", "'do_report'", ")", "force_reset", "=", "tf", ".", "placeholder", "(", "tf", ".", "bool", ",", "name", "=", "'force_reset'", ")", "algo", "=", "algo_cls", "(", "batch_env", ",", "step", ",", "is_training", ",", "should_log", ",", "config", ")", "done", ",", "score", ",", "summary", "=", "tools", ".", "simulate", "(", "batch_env", ",", "algo", ",", "should_log", ",", "force_reset", ")", "message", "=", "'Graph contains {} trainable variables.'", "tf", ".", "logging", ".", "info", "(", "message", ".", "format", "(", "tools", ".", "count_weights", "(", ")", ")", ")", "# pylint: enable=unused-variable", "return", "tools", ".", "AttrDict", "(", "locals", "(", ")", ")" ]
Define the algorithm and environment interaction. Args: batch_env: In-graph environments object. algo_cls: Constructor of a batch algorithm. config: Configuration object for the algorithm. Returns: Object providing graph elements via attributes.
[ "Define", "the", "algorithm", "and", "environment", "interaction", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L31-L54
train
google-research/batch-ppo
agents/scripts/utility.py
define_batch_env
def define_batch_env(constructor, num_agents, env_processes): """Create environments and apply all desired wrappers. Args: constructor: Constructor of an OpenAI gym environment. num_agents: Number of environments to combine in the batch. env_processes: Whether to step environment in external processes. Returns: In-graph environments object. """ with tf.variable_scope('environments'): if env_processes: envs = [ tools.wrappers.ExternalProcess(constructor) for _ in range(num_agents)] else: envs = [constructor() for _ in range(num_agents)] batch_env = tools.BatchEnv(envs, blocking=not env_processes) batch_env = tools.InGraphBatchEnv(batch_env) return batch_env
python
def define_batch_env(constructor, num_agents, env_processes): """Create environments and apply all desired wrappers. Args: constructor: Constructor of an OpenAI gym environment. num_agents: Number of environments to combine in the batch. env_processes: Whether to step environment in external processes. Returns: In-graph environments object. """ with tf.variable_scope('environments'): if env_processes: envs = [ tools.wrappers.ExternalProcess(constructor) for _ in range(num_agents)] else: envs = [constructor() for _ in range(num_agents)] batch_env = tools.BatchEnv(envs, blocking=not env_processes) batch_env = tools.InGraphBatchEnv(batch_env) return batch_env
[ "def", "define_batch_env", "(", "constructor", ",", "num_agents", ",", "env_processes", ")", ":", "with", "tf", ".", "variable_scope", "(", "'environments'", ")", ":", "if", "env_processes", ":", "envs", "=", "[", "tools", ".", "wrappers", ".", "ExternalProcess", "(", "constructor", ")", "for", "_", "in", "range", "(", "num_agents", ")", "]", "else", ":", "envs", "=", "[", "constructor", "(", ")", "for", "_", "in", "range", "(", "num_agents", ")", "]", "batch_env", "=", "tools", ".", "BatchEnv", "(", "envs", ",", "blocking", "=", "not", "env_processes", ")", "batch_env", "=", "tools", ".", "InGraphBatchEnv", "(", "batch_env", ")", "return", "batch_env" ]
Create environments and apply all desired wrappers. Args: constructor: Constructor of an OpenAI gym environment. num_agents: Number of environments to combine in the batch. env_processes: Whether to step environment in external processes. Returns: In-graph environments object.
[ "Create", "environments", "and", "apply", "all", "desired", "wrappers", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L57-L77
train
google-research/batch-ppo
agents/scripts/utility.py
define_saver
def define_saver(exclude=None): """Create a saver for the variables we want to checkpoint. Args: exclude: List of regexes to match variable names to exclude. Returns: Saver object. """ variables = [] exclude = exclude or [] exclude = [re.compile(regex) for regex in exclude] for variable in tf.global_variables(): if any(regex.match(variable.name) for regex in exclude): continue variables.append(variable) saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5) return saver
python
def define_saver(exclude=None): """Create a saver for the variables we want to checkpoint. Args: exclude: List of regexes to match variable names to exclude. Returns: Saver object. """ variables = [] exclude = exclude or [] exclude = [re.compile(regex) for regex in exclude] for variable in tf.global_variables(): if any(regex.match(variable.name) for regex in exclude): continue variables.append(variable) saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5) return saver
[ "def", "define_saver", "(", "exclude", "=", "None", ")", ":", "variables", "=", "[", "]", "exclude", "=", "exclude", "or", "[", "]", "exclude", "=", "[", "re", ".", "compile", "(", "regex", ")", "for", "regex", "in", "exclude", "]", "for", "variable", "in", "tf", ".", "global_variables", "(", ")", ":", "if", "any", "(", "regex", ".", "match", "(", "variable", ".", "name", ")", "for", "regex", "in", "exclude", ")", ":", "continue", "variables", ".", "append", "(", "variable", ")", "saver", "=", "tf", ".", "train", ".", "Saver", "(", "variables", ",", "keep_checkpoint_every_n_hours", "=", "5", ")", "return", "saver" ]
Create a saver for the variables we want to checkpoint. Args: exclude: List of regexes to match variable names to exclude. Returns: Saver object.
[ "Create", "a", "saver", "for", "the", "variables", "we", "want", "to", "checkpoint", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L80-L97
train
google-research/batch-ppo
agents/scripts/utility.py
initialize_variables
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None): """Initialize or restore variables from a checkpoint if available. Args: sess: Session to initialize variables in. saver: Saver to restore variables. logdir: Directory to search for checkpoints. checkpoint: Specify what checkpoint name to use; defaults to most recent. resume: Whether to expect recovering a checkpoint or starting a new run. Raises: ValueError: If resume expected but no log directory specified. RuntimeError: If no resume expected but a checkpoint was found. """ sess.run(tf.group( tf.local_variables_initializer(), tf.global_variables_initializer())) if resume and not (logdir or checkpoint): raise ValueError('Need to specify logdir to resume a checkpoint.') if logdir: state = tf.train.get_checkpoint_state(logdir) if checkpoint: checkpoint = os.path.join(logdir, checkpoint) if not checkpoint and state and state.model_checkpoint_path: checkpoint = state.model_checkpoint_path if checkpoint and resume is False: message = 'Found unexpected checkpoint when starting a new run.' raise RuntimeError(message) if checkpoint: saver.restore(sess, checkpoint)
python
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None): """Initialize or restore variables from a checkpoint if available. Args: sess: Session to initialize variables in. saver: Saver to restore variables. logdir: Directory to search for checkpoints. checkpoint: Specify what checkpoint name to use; defaults to most recent. resume: Whether to expect recovering a checkpoint or starting a new run. Raises: ValueError: If resume expected but no log directory specified. RuntimeError: If no resume expected but a checkpoint was found. """ sess.run(tf.group( tf.local_variables_initializer(), tf.global_variables_initializer())) if resume and not (logdir or checkpoint): raise ValueError('Need to specify logdir to resume a checkpoint.') if logdir: state = tf.train.get_checkpoint_state(logdir) if checkpoint: checkpoint = os.path.join(logdir, checkpoint) if not checkpoint and state and state.model_checkpoint_path: checkpoint = state.model_checkpoint_path if checkpoint and resume is False: message = 'Found unexpected checkpoint when starting a new run.' raise RuntimeError(message) if checkpoint: saver.restore(sess, checkpoint)
[ "def", "initialize_variables", "(", "sess", ",", "saver", ",", "logdir", ",", "checkpoint", "=", "None", ",", "resume", "=", "None", ")", ":", "sess", ".", "run", "(", "tf", ".", "group", "(", "tf", ".", "local_variables_initializer", "(", ")", ",", "tf", ".", "global_variables_initializer", "(", ")", ")", ")", "if", "resume", "and", "not", "(", "logdir", "or", "checkpoint", ")", ":", "raise", "ValueError", "(", "'Need to specify logdir to resume a checkpoint.'", ")", "if", "logdir", ":", "state", "=", "tf", ".", "train", ".", "get_checkpoint_state", "(", "logdir", ")", "if", "checkpoint", ":", "checkpoint", "=", "os", ".", "path", ".", "join", "(", "logdir", ",", "checkpoint", ")", "if", "not", "checkpoint", "and", "state", "and", "state", ".", "model_checkpoint_path", ":", "checkpoint", "=", "state", ".", "model_checkpoint_path", "if", "checkpoint", "and", "resume", "is", "False", ":", "message", "=", "'Found unexpected checkpoint when starting a new run.'", "raise", "RuntimeError", "(", "message", ")", "if", "checkpoint", ":", "saver", ".", "restore", "(", "sess", ",", "checkpoint", ")" ]
Initialize or restore variables from a checkpoint if available. Args: sess: Session to initialize variables in. saver: Saver to restore variables. logdir: Directory to search for checkpoints. checkpoint: Specify what checkpoint name to use; defaults to most recent. resume: Whether to expect recovering a checkpoint or starting a new run. Raises: ValueError: If resume expected but no log directory specified. RuntimeError: If no resume expected but a checkpoint was found.
[ "Initialize", "or", "restore", "variables", "from", "a", "checkpoint", "if", "available", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L100-L129
train
google-research/batch-ppo
agents/scripts/utility.py
save_config
def save_config(config, logdir=None): """Save a new configuration by name. If a logging directory is specified, is will be created and the configuration will be stored there. Otherwise, a log message will be printed. Args: config: Configuration object. logdir: Location for writing summaries and checkpoints if specified. Returns: Configuration object. """ if logdir: with config.unlocked: config.logdir = logdir message = 'Start a new run and write summaries and checkpoints to {}.' tf.logging.info(message.format(config.logdir)) tf.gfile.MakeDirs(config.logdir) config_path = os.path.join(config.logdir, 'config.yaml') with tf.gfile.FastGFile(config_path, 'w') as file_: yaml.dump(config, file_, default_flow_style=False) else: message = ( 'Start a new run without storing summaries and checkpoints since no ' 'logging directory was specified.') tf.logging.info(message) return config
python
def save_config(config, logdir=None): """Save a new configuration by name. If a logging directory is specified, is will be created and the configuration will be stored there. Otherwise, a log message will be printed. Args: config: Configuration object. logdir: Location for writing summaries and checkpoints if specified. Returns: Configuration object. """ if logdir: with config.unlocked: config.logdir = logdir message = 'Start a new run and write summaries and checkpoints to {}.' tf.logging.info(message.format(config.logdir)) tf.gfile.MakeDirs(config.logdir) config_path = os.path.join(config.logdir, 'config.yaml') with tf.gfile.FastGFile(config_path, 'w') as file_: yaml.dump(config, file_, default_flow_style=False) else: message = ( 'Start a new run without storing summaries and checkpoints since no ' 'logging directory was specified.') tf.logging.info(message) return config
[ "def", "save_config", "(", "config", ",", "logdir", "=", "None", ")", ":", "if", "logdir", ":", "with", "config", ".", "unlocked", ":", "config", ".", "logdir", "=", "logdir", "message", "=", "'Start a new run and write summaries and checkpoints to {}.'", "tf", ".", "logging", ".", "info", "(", "message", ".", "format", "(", "config", ".", "logdir", ")", ")", "tf", ".", "gfile", ".", "MakeDirs", "(", "config", ".", "logdir", ")", "config_path", "=", "os", ".", "path", ".", "join", "(", "config", ".", "logdir", ",", "'config.yaml'", ")", "with", "tf", ".", "gfile", ".", "FastGFile", "(", "config_path", ",", "'w'", ")", "as", "file_", ":", "yaml", ".", "dump", "(", "config", ",", "file_", ",", "default_flow_style", "=", "False", ")", "else", ":", "message", "=", "(", "'Start a new run without storing summaries and checkpoints since no '", "'logging directory was specified.'", ")", "tf", ".", "logging", ".", "info", "(", "message", ")", "return", "config" ]
Save a new configuration by name. If a logging directory is specified, is will be created and the configuration will be stored there. Otherwise, a log message will be printed. Args: config: Configuration object. logdir: Location for writing summaries and checkpoints if specified. Returns: Configuration object.
[ "Save", "a", "new", "configuration", "by", "name", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L132-L159
train
google-research/batch-ppo
agents/scripts/utility.py
load_config
def load_config(logdir): # pylint: disable=missing-raises-doc """Load a configuration from the log directory. Args: logdir: The logging directory containing the configuration file. Raises: IOError: The logging directory does not contain a configuration file. Returns: Configuration object. """ config_path = logdir and os.path.join(logdir, 'config.yaml') if not config_path or not tf.gfile.Exists(config_path): message = ( 'Cannot resume an existing run since the logging directory does not ' 'contain a configuration file.') raise IOError(message) with tf.gfile.FastGFile(config_path, 'r') as file_: config = yaml.load(file_, Loader=yaml.Loader) message = 'Resume run and write summaries and checkpoints to {}.' tf.logging.info(message.format(config.logdir)) return config
python
def load_config(logdir): # pylint: disable=missing-raises-doc """Load a configuration from the log directory. Args: logdir: The logging directory containing the configuration file. Raises: IOError: The logging directory does not contain a configuration file. Returns: Configuration object. """ config_path = logdir and os.path.join(logdir, 'config.yaml') if not config_path or not tf.gfile.Exists(config_path): message = ( 'Cannot resume an existing run since the logging directory does not ' 'contain a configuration file.') raise IOError(message) with tf.gfile.FastGFile(config_path, 'r') as file_: config = yaml.load(file_, Loader=yaml.Loader) message = 'Resume run and write summaries and checkpoints to {}.' tf.logging.info(message.format(config.logdir)) return config
[ "def", "load_config", "(", "logdir", ")", ":", "# pylint: disable=missing-raises-doc", "config_path", "=", "logdir", "and", "os", ".", "path", ".", "join", "(", "logdir", ",", "'config.yaml'", ")", "if", "not", "config_path", "or", "not", "tf", ".", "gfile", ".", "Exists", "(", "config_path", ")", ":", "message", "=", "(", "'Cannot resume an existing run since the logging directory does not '", "'contain a configuration file.'", ")", "raise", "IOError", "(", "message", ")", "with", "tf", ".", "gfile", ".", "FastGFile", "(", "config_path", ",", "'r'", ")", "as", "file_", ":", "config", "=", "yaml", ".", "load", "(", "file_", ",", "Loader", "=", "yaml", ".", "Loader", ")", "message", "=", "'Resume run and write summaries and checkpoints to {}.'", "tf", ".", "logging", ".", "info", "(", "message", ".", "format", "(", "config", ".", "logdir", ")", ")", "return", "config" ]
Load a configuration from the log directory. Args: logdir: The logging directory containing the configuration file. Raises: IOError: The logging directory does not contain a configuration file. Returns: Configuration object.
[ "Load", "a", "configuration", "from", "the", "log", "directory", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L162-L185
train
google-research/batch-ppo
agents/scripts/utility.py
set_up_logging
def set_up_logging(): """Configure the TensorFlow logger.""" tf.logging.set_verbosity(tf.logging.INFO) logging.getLogger('tensorflow').propagate = False
python
def set_up_logging(): """Configure the TensorFlow logger.""" tf.logging.set_verbosity(tf.logging.INFO) logging.getLogger('tensorflow').propagate = False
[ "def", "set_up_logging", "(", ")", ":", "tf", ".", "logging", ".", "set_verbosity", "(", "tf", ".", "logging", ".", "INFO", ")", "logging", ".", "getLogger", "(", "'tensorflow'", ")", ".", "propagate", "=", "False" ]
Configure the TensorFlow logger.
[ "Configure", "the", "TensorFlow", "logger", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L188-L191
train
google-research/batch-ppo
agents/scripts/visualize.py
_define_loop
def _define_loop(graph, eval_steps): """Create and configure an evaluation loop. Args: graph: Object providing graph elements via attributes. eval_steps: Number of evaluation steps per epoch. Returns: Loop object. """ loop = tools.Loop( None, graph.step, graph.should_log, graph.do_report, graph.force_reset) loop.add_phase( 'eval', graph.done, graph.score, graph.summary, eval_steps, report_every=eval_steps, log_every=None, checkpoint_every=None, feed={graph.is_training: False}) return loop
python
def _define_loop(graph, eval_steps): """Create and configure an evaluation loop. Args: graph: Object providing graph elements via attributes. eval_steps: Number of evaluation steps per epoch. Returns: Loop object. """ loop = tools.Loop( None, graph.step, graph.should_log, graph.do_report, graph.force_reset) loop.add_phase( 'eval', graph.done, graph.score, graph.summary, eval_steps, report_every=eval_steps, log_every=None, checkpoint_every=None, feed={graph.is_training: False}) return loop
[ "def", "_define_loop", "(", "graph", ",", "eval_steps", ")", ":", "loop", "=", "tools", ".", "Loop", "(", "None", ",", "graph", ".", "step", ",", "graph", ".", "should_log", ",", "graph", ".", "do_report", ",", "graph", ".", "force_reset", ")", "loop", ".", "add_phase", "(", "'eval'", ",", "graph", ".", "done", ",", "graph", ".", "score", ",", "graph", ".", "summary", ",", "eval_steps", ",", "report_every", "=", "eval_steps", ",", "log_every", "=", "None", ",", "checkpoint_every", "=", "None", ",", "feed", "=", "{", "graph", ".", "is_training", ":", "False", "}", ")", "return", "loop" ]
Create and configure an evaluation loop. Args: graph: Object providing graph elements via attributes. eval_steps: Number of evaluation steps per epoch. Returns: Loop object.
[ "Create", "and", "configure", "an", "evaluation", "loop", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/visualize.py#L74-L92
train
google-research/batch-ppo
agents/scripts/visualize.py
visualize
def visualize( logdir, outdir, num_agents, num_episodes, checkpoint=None, env_processes=True): """Recover checkpoint and render videos from it. Args: logdir: Logging directory of the trained algorithm. outdir: Directory to store rendered videos in. num_agents: Number of environments to simulate in parallel. num_episodes: Total number of episodes to simulate. checkpoint: Checkpoint name to load; defaults to most recent. env_processes: Whether to step environments in separate processes. """ config = utility.load_config(logdir) with tf.device('/cpu:0'): batch_env = utility.define_batch_env( lambda: _create_environment(config, outdir), num_agents, env_processes) graph = utility.define_simulation_graph( batch_env, config.algorithm, config) total_steps = num_episodes * config.max_length loop = _define_loop(graph, total_steps) saver = utility.define_saver( exclude=(r'.*_temporary.*', r'global_step')) sess_config = tf.ConfigProto(allow_soft_placement=True) sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: utility.initialize_variables( sess, saver, config.logdir, checkpoint, resume=True) for unused_score in loop.run(sess, saver, total_steps): pass batch_env.close()
python
def visualize( logdir, outdir, num_agents, num_episodes, checkpoint=None, env_processes=True): """Recover checkpoint and render videos from it. Args: logdir: Logging directory of the trained algorithm. outdir: Directory to store rendered videos in. num_agents: Number of environments to simulate in parallel. num_episodes: Total number of episodes to simulate. checkpoint: Checkpoint name to load; defaults to most recent. env_processes: Whether to step environments in separate processes. """ config = utility.load_config(logdir) with tf.device('/cpu:0'): batch_env = utility.define_batch_env( lambda: _create_environment(config, outdir), num_agents, env_processes) graph = utility.define_simulation_graph( batch_env, config.algorithm, config) total_steps = num_episodes * config.max_length loop = _define_loop(graph, total_steps) saver = utility.define_saver( exclude=(r'.*_temporary.*', r'global_step')) sess_config = tf.ConfigProto(allow_soft_placement=True) sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: utility.initialize_variables( sess, saver, config.logdir, checkpoint, resume=True) for unused_score in loop.run(sess, saver, total_steps): pass batch_env.close()
[ "def", "visualize", "(", "logdir", ",", "outdir", ",", "num_agents", ",", "num_episodes", ",", "checkpoint", "=", "None", ",", "env_processes", "=", "True", ")", ":", "config", "=", "utility", ".", "load_config", "(", "logdir", ")", "with", "tf", ".", "device", "(", "'/cpu:0'", ")", ":", "batch_env", "=", "utility", ".", "define_batch_env", "(", "lambda", ":", "_create_environment", "(", "config", ",", "outdir", ")", ",", "num_agents", ",", "env_processes", ")", "graph", "=", "utility", ".", "define_simulation_graph", "(", "batch_env", ",", "config", ".", "algorithm", ",", "config", ")", "total_steps", "=", "num_episodes", "*", "config", ".", "max_length", "loop", "=", "_define_loop", "(", "graph", ",", "total_steps", ")", "saver", "=", "utility", ".", "define_saver", "(", "exclude", "=", "(", "r'.*_temporary.*'", ",", "r'global_step'", ")", ")", "sess_config", "=", "tf", ".", "ConfigProto", "(", "allow_soft_placement", "=", "True", ")", "sess_config", ".", "gpu_options", ".", "allow_growth", "=", "True", "with", "tf", ".", "Session", "(", "config", "=", "sess_config", ")", "as", "sess", ":", "utility", ".", "initialize_variables", "(", "sess", ",", "saver", ",", "config", ".", "logdir", ",", "checkpoint", ",", "resume", "=", "True", ")", "for", "unused_score", "in", "loop", ".", "run", "(", "sess", ",", "saver", ",", "total_steps", ")", ":", "pass", "batch_env", ".", "close", "(", ")" ]
Recover checkpoint and render videos from it. Args: logdir: Logging directory of the trained algorithm. outdir: Directory to store rendered videos in. num_agents: Number of environments to simulate in parallel. num_episodes: Total number of episodes to simulate. checkpoint: Checkpoint name to load; defaults to most recent. env_processes: Whether to step environments in separate processes.
[ "Recover", "checkpoint", "and", "render", "videos", "from", "it", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/visualize.py#L95-L126
train
google-research/batch-ppo
agents/scripts/visualize.py
main
def main(_): """Load a trained algorithm and render videos.""" utility.set_up_logging() if not FLAGS.logdir or not FLAGS.outdir: raise KeyError('You must specify logging and outdirs directories.') FLAGS.logdir = os.path.expanduser(FLAGS.logdir) FLAGS.outdir = os.path.expanduser(FLAGS.outdir) visualize( FLAGS.logdir, FLAGS.outdir, FLAGS.num_agents, FLAGS.num_episodes, FLAGS.checkpoint, FLAGS.env_processes)
python
def main(_): """Load a trained algorithm and render videos.""" utility.set_up_logging() if not FLAGS.logdir or not FLAGS.outdir: raise KeyError('You must specify logging and outdirs directories.') FLAGS.logdir = os.path.expanduser(FLAGS.logdir) FLAGS.outdir = os.path.expanduser(FLAGS.outdir) visualize( FLAGS.logdir, FLAGS.outdir, FLAGS.num_agents, FLAGS.num_episodes, FLAGS.checkpoint, FLAGS.env_processes)
[ "def", "main", "(", "_", ")", ":", "utility", ".", "set_up_logging", "(", ")", "if", "not", "FLAGS", ".", "logdir", "or", "not", "FLAGS", ".", "outdir", ":", "raise", "KeyError", "(", "'You must specify logging and outdirs directories.'", ")", "FLAGS", ".", "logdir", "=", "os", ".", "path", ".", "expanduser", "(", "FLAGS", ".", "logdir", ")", "FLAGS", ".", "outdir", "=", "os", ".", "path", ".", "expanduser", "(", "FLAGS", ".", "outdir", ")", "visualize", "(", "FLAGS", ".", "logdir", ",", "FLAGS", ".", "outdir", ",", "FLAGS", ".", "num_agents", ",", "FLAGS", ".", "num_episodes", ",", "FLAGS", ".", "checkpoint", ",", "FLAGS", ".", "env_processes", ")" ]
Load a trained algorithm and render videos.
[ "Load", "a", "trained", "algorithm", "and", "render", "videos", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/visualize.py#L129-L138
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
reinit_nested_vars
def reinit_nested_vars(variables, indices=None): """Reset all variables in a nested tuple to zeros. Args: variables: Nested tuple or list of variables. indices: Batch indices to reset, defaults to all. Returns: Operation. """ if isinstance(variables, (tuple, list)): return tf.group(*[ reinit_nested_vars(variable, indices) for variable in variables]) if indices is None: return variables.assign(tf.zeros_like(variables)) else: zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list()) return tf.scatter_update(variables, indices, zeros)
python
def reinit_nested_vars(variables, indices=None): """Reset all variables in a nested tuple to zeros. Args: variables: Nested tuple or list of variables. indices: Batch indices to reset, defaults to all. Returns: Operation. """ if isinstance(variables, (tuple, list)): return tf.group(*[ reinit_nested_vars(variable, indices) for variable in variables]) if indices is None: return variables.assign(tf.zeros_like(variables)) else: zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list()) return tf.scatter_update(variables, indices, zeros)
[ "def", "reinit_nested_vars", "(", "variables", ",", "indices", "=", "None", ")", ":", "if", "isinstance", "(", "variables", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "tf", ".", "group", "(", "*", "[", "reinit_nested_vars", "(", "variable", ",", "indices", ")", "for", "variable", "in", "variables", "]", ")", "if", "indices", "is", "None", ":", "return", "variables", ".", "assign", "(", "tf", ".", "zeros_like", "(", "variables", ")", ")", "else", ":", "zeros", "=", "tf", ".", "zeros", "(", "[", "tf", ".", "shape", "(", "indices", ")", "[", "0", "]", "]", "+", "variables", ".", "shape", "[", "1", ":", "]", ".", "as_list", "(", ")", ")", "return", "tf", ".", "scatter_update", "(", "variables", ",", "indices", ",", "zeros", ")" ]
Reset all variables in a nested tuple to zeros. Args: variables: Nested tuple or list of variables. indices: Batch indices to reset, defaults to all. Returns: Operation.
[ "Reset", "all", "variables", "in", "a", "nested", "tuple", "to", "zeros", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L28-L45
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
assign_nested_vars
def assign_nested_vars(variables, tensors, indices=None): """Assign tensors to matching nested tuple of variables. Args: variables: Nested tuple or list of variables to update. tensors: Nested tuple or list of tensors to assign. indices: Batch indices to assign to; default to all. Returns: Operation. """ if isinstance(variables, (tuple, list)): return tf.group(*[ assign_nested_vars(variable, tensor) for variable, tensor in zip(variables, tensors)]) if indices is None: return variables.assign(tensors) else: return tf.scatter_update(variables, indices, tensors)
python
def assign_nested_vars(variables, tensors, indices=None): """Assign tensors to matching nested tuple of variables. Args: variables: Nested tuple or list of variables to update. tensors: Nested tuple or list of tensors to assign. indices: Batch indices to assign to; default to all. Returns: Operation. """ if isinstance(variables, (tuple, list)): return tf.group(*[ assign_nested_vars(variable, tensor) for variable, tensor in zip(variables, tensors)]) if indices is None: return variables.assign(tensors) else: return tf.scatter_update(variables, indices, tensors)
[ "def", "assign_nested_vars", "(", "variables", ",", "tensors", ",", "indices", "=", "None", ")", ":", "if", "isinstance", "(", "variables", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "tf", ".", "group", "(", "*", "[", "assign_nested_vars", "(", "variable", ",", "tensor", ")", "for", "variable", ",", "tensor", "in", "zip", "(", "variables", ",", "tensors", ")", "]", ")", "if", "indices", "is", "None", ":", "return", "variables", ".", "assign", "(", "tensors", ")", "else", ":", "return", "tf", ".", "scatter_update", "(", "variables", ",", "indices", ",", "tensors", ")" ]
Assign tensors to matching nested tuple of variables. Args: variables: Nested tuple or list of variables to update. tensors: Nested tuple or list of tensors to assign. indices: Batch indices to assign to; default to all. Returns: Operation.
[ "Assign", "tensors", "to", "matching", "nested", "tuple", "of", "variables", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L48-L66
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
discounted_return
def discounted_return(reward, length, discount): """Discounted Monte-Carlo returns.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) return_ = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur + discount * agg, tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]), tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(return_), 'return')
python
def discounted_return(reward, length, discount): """Discounted Monte-Carlo returns.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) return_ = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur + discount * agg, tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]), tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(return_), 'return')
[ "def", "discounted_return", "(", "reward", ",", "length", ",", "discount", ")", ":", "timestep", "=", "tf", ".", "range", "(", "reward", ".", "shape", "[", "1", "]", ".", "value", ")", "mask", "=", "tf", ".", "cast", "(", "timestep", "[", "None", ",", ":", "]", "<", "length", "[", ":", ",", "None", "]", ",", "tf", ".", "float32", ")", "return_", "=", "tf", ".", "reverse", "(", "tf", ".", "transpose", "(", "tf", ".", "scan", "(", "lambda", "agg", ",", "cur", ":", "cur", "+", "discount", "*", "agg", ",", "tf", ".", "transpose", "(", "tf", ".", "reverse", "(", "mask", "*", "reward", ",", "[", "1", "]", ")", ",", "[", "1", ",", "0", "]", ")", ",", "tf", ".", "zeros_like", "(", "reward", "[", ":", ",", "-", "1", "]", ")", ",", "1", ",", "False", ")", ",", "[", "1", ",", "0", "]", ")", ",", "[", "1", "]", ")", "return", "tf", ".", "check_numerics", "(", "tf", ".", "stop_gradient", "(", "return_", ")", ",", "'return'", ")" ]
Discounted Monte-Carlo returns.
[ "Discounted", "Monte", "-", "Carlo", "returns", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L69-L77
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
fixed_step_return
def fixed_step_return(reward, value, length, discount, window): """N-step discounted return.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) return_ = tf.zeros_like(reward) for _ in range(window): return_ += reward reward = discount * tf.concat( [reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1) return_ += discount ** window * tf.concat( [value[:, window:], tf.zeros_like(value[:, -window:])], 1) return tf.check_numerics(tf.stop_gradient(mask * return_), 'return')
python
def fixed_step_return(reward, value, length, discount, window): """N-step discounted return.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) return_ = tf.zeros_like(reward) for _ in range(window): return_ += reward reward = discount * tf.concat( [reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1) return_ += discount ** window * tf.concat( [value[:, window:], tf.zeros_like(value[:, -window:])], 1) return tf.check_numerics(tf.stop_gradient(mask * return_), 'return')
[ "def", "fixed_step_return", "(", "reward", ",", "value", ",", "length", ",", "discount", ",", "window", ")", ":", "timestep", "=", "tf", ".", "range", "(", "reward", ".", "shape", "[", "1", "]", ".", "value", ")", "mask", "=", "tf", ".", "cast", "(", "timestep", "[", "None", ",", ":", "]", "<", "length", "[", ":", ",", "None", "]", ",", "tf", ".", "float32", ")", "return_", "=", "tf", ".", "zeros_like", "(", "reward", ")", "for", "_", "in", "range", "(", "window", ")", ":", "return_", "+=", "reward", "reward", "=", "discount", "*", "tf", ".", "concat", "(", "[", "reward", "[", ":", ",", "1", ":", "]", ",", "tf", ".", "zeros_like", "(", "reward", "[", ":", ",", "-", "1", ":", "]", ")", "]", ",", "1", ")", "return_", "+=", "discount", "**", "window", "*", "tf", ".", "concat", "(", "[", "value", "[", ":", ",", "window", ":", "]", ",", "tf", ".", "zeros_like", "(", "value", "[", ":", ",", "-", "window", ":", "]", ")", "]", ",", "1", ")", "return", "tf", ".", "check_numerics", "(", "tf", ".", "stop_gradient", "(", "mask", "*", "return_", ")", ",", "'return'", ")" ]
N-step discounted return.
[ "N", "-", "step", "discounted", "return", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L80-L91
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
lambda_return
def lambda_return(reward, value, length, discount, lambda_): """TD-lambda returns.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) sequence = mask * reward + discount * value * (1 - lambda_) discount = mask * discount * lambda_ sequence = tf.stack([sequence, discount], 2) return_ = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur[0] + cur[1] * agg, tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]), tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(return_), 'return')
python
def lambda_return(reward, value, length, discount, lambda_): """TD-lambda returns.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) sequence = mask * reward + discount * value * (1 - lambda_) discount = mask * discount * lambda_ sequence = tf.stack([sequence, discount], 2) return_ = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur[0] + cur[1] * agg, tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]), tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(return_), 'return')
[ "def", "lambda_return", "(", "reward", ",", "value", ",", "length", ",", "discount", ",", "lambda_", ")", ":", "timestep", "=", "tf", ".", "range", "(", "reward", ".", "shape", "[", "1", "]", ".", "value", ")", "mask", "=", "tf", ".", "cast", "(", "timestep", "[", "None", ",", ":", "]", "<", "length", "[", ":", ",", "None", "]", ",", "tf", ".", "float32", ")", "sequence", "=", "mask", "*", "reward", "+", "discount", "*", "value", "*", "(", "1", "-", "lambda_", ")", "discount", "=", "mask", "*", "discount", "*", "lambda_", "sequence", "=", "tf", ".", "stack", "(", "[", "sequence", ",", "discount", "]", ",", "2", ")", "return_", "=", "tf", ".", "reverse", "(", "tf", ".", "transpose", "(", "tf", ".", "scan", "(", "lambda", "agg", ",", "cur", ":", "cur", "[", "0", "]", "+", "cur", "[", "1", "]", "*", "agg", ",", "tf", ".", "transpose", "(", "tf", ".", "reverse", "(", "sequence", ",", "[", "1", "]", ")", ",", "[", "1", ",", "2", ",", "0", "]", ")", ",", "tf", ".", "zeros_like", "(", "value", "[", ":", ",", "-", "1", "]", ")", ",", "1", ",", "False", ")", ",", "[", "1", ",", "0", "]", ")", ",", "[", "1", "]", ")", "return", "tf", ".", "check_numerics", "(", "tf", ".", "stop_gradient", "(", "return_", ")", ",", "'return'", ")" ]
TD-lambda returns.
[ "TD", "-", "lambda", "returns", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L94-L105
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
lambda_advantage
def lambda_advantage(reward, value, length, discount, gae_lambda): """Generalized Advantage Estimation.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1) delta = reward + discount * next_value - value advantage = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur + gae_lambda * discount * agg, tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]), tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')
python
def lambda_advantage(reward, value, length, discount, gae_lambda): """Generalized Advantage Estimation.""" timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1) delta = reward + discount * next_value - value advantage = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur + gae_lambda * discount * agg, tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]), tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')
[ "def", "lambda_advantage", "(", "reward", ",", "value", ",", "length", ",", "discount", ",", "gae_lambda", ")", ":", "timestep", "=", "tf", ".", "range", "(", "reward", ".", "shape", "[", "1", "]", ".", "value", ")", "mask", "=", "tf", ".", "cast", "(", "timestep", "[", "None", ",", ":", "]", "<", "length", "[", ":", ",", "None", "]", ",", "tf", ".", "float32", ")", "next_value", "=", "tf", ".", "concat", "(", "[", "value", "[", ":", ",", "1", ":", "]", ",", "tf", ".", "zeros_like", "(", "value", "[", ":", ",", "-", "1", ":", "]", ")", "]", ",", "1", ")", "delta", "=", "reward", "+", "discount", "*", "next_value", "-", "value", "advantage", "=", "tf", ".", "reverse", "(", "tf", ".", "transpose", "(", "tf", ".", "scan", "(", "lambda", "agg", ",", "cur", ":", "cur", "+", "gae_lambda", "*", "discount", "*", "agg", ",", "tf", ".", "transpose", "(", "tf", ".", "reverse", "(", "mask", "*", "delta", ",", "[", "1", "]", ")", ",", "[", "1", ",", "0", "]", ")", ",", "tf", ".", "zeros_like", "(", "delta", "[", ":", ",", "-", "1", "]", ")", ",", "1", ",", "False", ")", ",", "[", "1", ",", "0", "]", ")", ",", "[", "1", "]", ")", "return", "tf", ".", "check_numerics", "(", "tf", ".", "stop_gradient", "(", "advantage", ")", ",", "'advantage'", ")" ]
Generalized Advantage Estimation.
[ "Generalized", "Advantage", "Estimation", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L108-L118
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
available_gpus
def available_gpus(): """List of GPU device names detected by TensorFlow.""" local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU']
python
def available_gpus(): """List of GPU device names detected by TensorFlow.""" local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU']
[ "def", "available_gpus", "(", ")", ":", "local_device_protos", "=", "device_lib", ".", "list_local_devices", "(", ")", "return", "[", "x", ".", "name", "for", "x", "in", "local_device_protos", "if", "x", ".", "device_type", "==", "'GPU'", "]" ]
List of GPU device names detected by TensorFlow.
[ "List", "of", "GPU", "device", "names", "detected", "by", "TensorFlow", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L121-L124
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
gradient_summaries
def gradient_summaries(grad_vars, groups=None, scope='gradients'): """Create histogram summaries of the gradient. Summaries can be grouped via regexes matching variables names. Args: grad_vars: List of (gradient, variable) tuples as returned by optimizers. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor. """ groups = groups or {r'all': r'.*'} grouped = collections.defaultdict(list) for grad, var in grad_vars: if grad is None: continue for name, pattern in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(grad) for name in groups: if name not in grouped: tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] for name, grads in grouped.items(): grads = [tf.reshape(grad, [-1]) for grad in grads] grads = tf.concat(grads, 0) summaries.append(tf.summary.histogram(scope + '/' + name, grads)) return tf.summary.merge(summaries)
python
def gradient_summaries(grad_vars, groups=None, scope='gradients'): """Create histogram summaries of the gradient. Summaries can be grouped via regexes matching variables names. Args: grad_vars: List of (gradient, variable) tuples as returned by optimizers. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor. """ groups = groups or {r'all': r'.*'} grouped = collections.defaultdict(list) for grad, var in grad_vars: if grad is None: continue for name, pattern in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(grad) for name in groups: if name not in grouped: tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] for name, grads in grouped.items(): grads = [tf.reshape(grad, [-1]) for grad in grads] grads = tf.concat(grads, 0) summaries.append(tf.summary.histogram(scope + '/' + name, grads)) return tf.summary.merge(summaries)
[ "def", "gradient_summaries", "(", "grad_vars", ",", "groups", "=", "None", ",", "scope", "=", "'gradients'", ")", ":", "groups", "=", "groups", "or", "{", "r'all'", ":", "r'.*'", "}", "grouped", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "grad", ",", "var", "in", "grad_vars", ":", "if", "grad", "is", "None", ":", "continue", "for", "name", ",", "pattern", "in", "groups", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "pattern", ",", "var", ".", "name", ")", ":", "name", "=", "re", ".", "sub", "(", "pattern", ",", "name", ",", "var", ".", "name", ")", "grouped", "[", "name", "]", ".", "append", "(", "grad", ")", "for", "name", "in", "groups", ":", "if", "name", "not", "in", "grouped", ":", "tf", ".", "logging", ".", "warn", "(", "\"No variables matching '{}' group.\"", ".", "format", "(", "name", ")", ")", "summaries", "=", "[", "]", "for", "name", ",", "grads", "in", "grouped", ".", "items", "(", ")", ":", "grads", "=", "[", "tf", ".", "reshape", "(", "grad", ",", "[", "-", "1", "]", ")", "for", "grad", "in", "grads", "]", "grads", "=", "tf", ".", "concat", "(", "grads", ",", "0", ")", "summaries", ".", "append", "(", "tf", ".", "summary", ".", "histogram", "(", "scope", "+", "'/'", "+", "name", ",", "grads", ")", ")", "return", "tf", ".", "summary", ".", "merge", "(", "summaries", ")" ]
Create histogram summaries of the gradient. Summaries can be grouped via regexes matching variables names. Args: grad_vars: List of (gradient, variable) tuples as returned by optimizers. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor.
[ "Create", "histogram", "summaries", "of", "the", "gradient", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L127-L157
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
variable_summaries
def variable_summaries(vars_, groups=None, scope='weights'): """Create histogram summaries for the provided variables. Summaries can be grouped via regexes matching variables names. Args: vars_: List of variables to summarize. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor. """ groups = groups or {r'all': r'.*'} grouped = collections.defaultdict(list) for var in vars_: for name, pattern in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(var) for name in groups: if name not in grouped: tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] # pylint: disable=redefined-argument-from-local for name, vars_ in grouped.items(): vars_ = [tf.reshape(var, [-1]) for var in vars_] vars_ = tf.concat(vars_, 0) summaries.append(tf.summary.histogram(scope + '/' + name, vars_)) return tf.summary.merge(summaries)
python
def variable_summaries(vars_, groups=None, scope='weights'): """Create histogram summaries for the provided variables. Summaries can be grouped via regexes matching variables names. Args: vars_: List of variables to summarize. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor. """ groups = groups or {r'all': r'.*'} grouped = collections.defaultdict(list) for var in vars_: for name, pattern in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(var) for name in groups: if name not in grouped: tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] # pylint: disable=redefined-argument-from-local for name, vars_ in grouped.items(): vars_ = [tf.reshape(var, [-1]) for var in vars_] vars_ = tf.concat(vars_, 0) summaries.append(tf.summary.histogram(scope + '/' + name, vars_)) return tf.summary.merge(summaries)
[ "def", "variable_summaries", "(", "vars_", ",", "groups", "=", "None", ",", "scope", "=", "'weights'", ")", ":", "groups", "=", "groups", "or", "{", "r'all'", ":", "r'.*'", "}", "grouped", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "var", "in", "vars_", ":", "for", "name", ",", "pattern", "in", "groups", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "pattern", ",", "var", ".", "name", ")", ":", "name", "=", "re", ".", "sub", "(", "pattern", ",", "name", ",", "var", ".", "name", ")", "grouped", "[", "name", "]", ".", "append", "(", "var", ")", "for", "name", "in", "groups", ":", "if", "name", "not", "in", "grouped", ":", "tf", ".", "logging", ".", "warn", "(", "\"No variables matching '{}' group.\"", ".", "format", "(", "name", ")", ")", "summaries", "=", "[", "]", "# pylint: disable=redefined-argument-from-local", "for", "name", ",", "vars_", "in", "grouped", ".", "items", "(", ")", ":", "vars_", "=", "[", "tf", ".", "reshape", "(", "var", ",", "[", "-", "1", "]", ")", "for", "var", "in", "vars_", "]", "vars_", "=", "tf", ".", "concat", "(", "vars_", ",", "0", ")", "summaries", ".", "append", "(", "tf", ".", "summary", ".", "histogram", "(", "scope", "+", "'/'", "+", "name", ",", "vars_", ")", ")", "return", "tf", ".", "summary", ".", "merge", "(", "summaries", ")" ]
Create histogram summaries for the provided variables. Summaries can be grouped via regexes matching variables names. Args: vars_: List of variables to summarize. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor.
[ "Create", "histogram", "summaries", "for", "the", "provided", "variables", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L160-L189
train
google-research/batch-ppo
agents/algorithms/ppo/utility.py
set_dimension
def set_dimension(tensor, axis, value): """Set the length of a tensor along the specified dimension. Args: tensor: Tensor to define shape of. axis: Dimension to set the static shape for. value: Integer holding the length. Raises: ValueError: When the tensor already has a different length specified. """ shape = tensor.shape.as_list() if shape[axis] not in (value, None): message = 'Cannot set dimension {} of tensor {} to {}; is already {}.' raise ValueError(message.format(axis, tensor.name, value, shape[axis])) shape[axis] = value tensor.set_shape(shape)
python
def set_dimension(tensor, axis, value): """Set the length of a tensor along the specified dimension. Args: tensor: Tensor to define shape of. axis: Dimension to set the static shape for. value: Integer holding the length. Raises: ValueError: When the tensor already has a different length specified. """ shape = tensor.shape.as_list() if shape[axis] not in (value, None): message = 'Cannot set dimension {} of tensor {} to {}; is already {}.' raise ValueError(message.format(axis, tensor.name, value, shape[axis])) shape[axis] = value tensor.set_shape(shape)
[ "def", "set_dimension", "(", "tensor", ",", "axis", ",", "value", ")", ":", "shape", "=", "tensor", ".", "shape", ".", "as_list", "(", ")", "if", "shape", "[", "axis", "]", "not", "in", "(", "value", ",", "None", ")", ":", "message", "=", "'Cannot set dimension {} of tensor {} to {}; is already {}.'", "raise", "ValueError", "(", "message", ".", "format", "(", "axis", ",", "tensor", ".", "name", ",", "value", ",", "shape", "[", "axis", "]", ")", ")", "shape", "[", "axis", "]", "=", "value", "tensor", ".", "set_shape", "(", "shape", ")" ]
Set the length of a tensor along the specified dimension. Args: tensor: Tensor to define shape of. axis: Dimension to set the static shape for. value: Integer holding the length. Raises: ValueError: When the tensor already has a different length specified.
[ "Set", "the", "length", "of", "a", "tensor", "along", "the", "specified", "dimension", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L192-L208
train
google-research/batch-ppo
agents/scripts/configs.py
default
def default(): """Default configuration for PPO.""" # General algorithm = algorithms.PPO num_agents = 30 eval_episodes = 30 use_gpu = False # Environment normalize_ranges = True # Network network = networks.feed_forward_gaussian weight_summaries = dict( all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*') policy_layers = 200, 100 value_layers = 200, 100 init_output_factor = 0.1 init_std = 0.35 # Optimization update_every = 30 update_epochs = 25 optimizer = tf.train.AdamOptimizer learning_rate = 1e-4 # Losses discount = 0.995 kl_target = 1e-2 kl_cutoff_factor = 2 kl_cutoff_coef = 1000 kl_init_penalty = 1 return locals()
python
def default(): """Default configuration for PPO.""" # General algorithm = algorithms.PPO num_agents = 30 eval_episodes = 30 use_gpu = False # Environment normalize_ranges = True # Network network = networks.feed_forward_gaussian weight_summaries = dict( all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*') policy_layers = 200, 100 value_layers = 200, 100 init_output_factor = 0.1 init_std = 0.35 # Optimization update_every = 30 update_epochs = 25 optimizer = tf.train.AdamOptimizer learning_rate = 1e-4 # Losses discount = 0.995 kl_target = 1e-2 kl_cutoff_factor = 2 kl_cutoff_coef = 1000 kl_init_penalty = 1 return locals()
[ "def", "default", "(", ")", ":", "# General", "algorithm", "=", "algorithms", ".", "PPO", "num_agents", "=", "30", "eval_episodes", "=", "30", "use_gpu", "=", "False", "# Environment", "normalize_ranges", "=", "True", "# Network", "network", "=", "networks", ".", "feed_forward_gaussian", "weight_summaries", "=", "dict", "(", "all", "=", "r'.*'", ",", "policy", "=", "r'.*/policy/.*'", ",", "value", "=", "r'.*/value/.*'", ")", "policy_layers", "=", "200", ",", "100", "value_layers", "=", "200", ",", "100", "init_output_factor", "=", "0.1", "init_std", "=", "0.35", "# Optimization", "update_every", "=", "30", "update_epochs", "=", "25", "optimizer", "=", "tf", ".", "train", ".", "AdamOptimizer", "learning_rate", "=", "1e-4", "# Losses", "discount", "=", "0.995", "kl_target", "=", "1e-2", "kl_cutoff_factor", "=", "2", "kl_cutoff_coef", "=", "1000", "kl_init_penalty", "=", "1", "return", "locals", "(", ")" ]
Default configuration for PPO.
[ "Default", "configuration", "for", "PPO", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L29-L57
train
google-research/batch-ppo
agents/scripts/configs.py
pendulum
def pendulum(): """Configuration for the pendulum classic control task.""" locals().update(default()) # Environment env = 'Pendulum-v0' max_length = 200 steps = 1e6 # 1M # Optimization batch_size = 20 chunk_length = 50 return locals()
python
def pendulum(): """Configuration for the pendulum classic control task.""" locals().update(default()) # Environment env = 'Pendulum-v0' max_length = 200 steps = 1e6 # 1M # Optimization batch_size = 20 chunk_length = 50 return locals()
[ "def", "pendulum", "(", ")", ":", "locals", "(", ")", ".", "update", "(", "default", "(", ")", ")", "# Environment", "env", "=", "'Pendulum-v0'", "max_length", "=", "200", "steps", "=", "1e6", "# 1M", "# Optimization", "batch_size", "=", "20", "chunk_length", "=", "50", "return", "locals", "(", ")" ]
Configuration for the pendulum classic control task.
[ "Configuration", "for", "the", "pendulum", "classic", "control", "task", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L60-L70
train
google-research/batch-ppo
agents/scripts/configs.py
cartpole
def cartpole(): """Configuration for the cart pole classic control task.""" locals().update(default()) # Environment env = 'CartPole-v1' max_length = 500 steps = 2e5 # 200k normalize_ranges = False # The env reports wrong ranges. # Network network = networks.feed_forward_categorical return locals()
python
def cartpole(): """Configuration for the cart pole classic control task.""" locals().update(default()) # Environment env = 'CartPole-v1' max_length = 500 steps = 2e5 # 200k normalize_ranges = False # The env reports wrong ranges. # Network network = networks.feed_forward_categorical return locals()
[ "def", "cartpole", "(", ")", ":", "locals", "(", ")", ".", "update", "(", "default", "(", ")", ")", "# Environment", "env", "=", "'CartPole-v1'", "max_length", "=", "500", "steps", "=", "2e5", "# 200k", "normalize_ranges", "=", "False", "# The env reports wrong ranges.", "# Network", "network", "=", "networks", ".", "feed_forward_categorical", "return", "locals", "(", ")" ]
Configuration for the cart pole classic control task.
[ "Configuration", "for", "the", "cart", "pole", "classic", "control", "task", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L73-L83
train
google-research/batch-ppo
agents/scripts/configs.py
reacher
def reacher(): """Configuration for MuJoCo's reacher task.""" locals().update(default()) # Environment env = 'Reacher-v2' max_length = 1000 steps = 5e6 # 5M discount = 0.985 update_every = 60 return locals()
python
def reacher(): """Configuration for MuJoCo's reacher task.""" locals().update(default()) # Environment env = 'Reacher-v2' max_length = 1000 steps = 5e6 # 5M discount = 0.985 update_every = 60 return locals()
[ "def", "reacher", "(", ")", ":", "locals", "(", ")", ".", "update", "(", "default", "(", ")", ")", "# Environment", "env", "=", "'Reacher-v2'", "max_length", "=", "1000", "steps", "=", "5e6", "# 5M", "discount", "=", "0.985", "update_every", "=", "60", "return", "locals", "(", ")" ]
Configuration for MuJoCo's reacher task.
[ "Configuration", "for", "MuJoCo", "s", "reacher", "task", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L86-L95
train
google-research/batch-ppo
agents/scripts/configs.py
bullet_ant
def bullet_ant(): """Configuration for PyBullet's ant task.""" locals().update(default()) # Environment import pybullet_envs # noqa pylint: disable=unused-import env = 'AntBulletEnv-v0' max_length = 1000 steps = 3e7 # 30M update_every = 60 return locals()
python
def bullet_ant(): """Configuration for PyBullet's ant task.""" locals().update(default()) # Environment import pybullet_envs # noqa pylint: disable=unused-import env = 'AntBulletEnv-v0' max_length = 1000 steps = 3e7 # 30M update_every = 60 return locals()
[ "def", "bullet_ant", "(", ")", ":", "locals", "(", ")", ".", "update", "(", "default", "(", ")", ")", "# Environment", "import", "pybullet_envs", "# noqa pylint: disable=unused-import", "env", "=", "'AntBulletEnv-v0'", "max_length", "=", "1000", "steps", "=", "3e7", "# 30M", "update_every", "=", "60", "return", "locals", "(", ")" ]
Configuration for PyBullet's ant task.
[ "Configuration", "for", "PyBullet", "s", "ant", "task", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L151-L160
train
google-research/batch-ppo
agents/tools/batch_env.py
BatchEnv.step
def step(self, actions): """Forward a batch of actions to the wrapped environments. Args: actions: Batched action to apply to the environment. Raises: ValueError: Invalid actions. Returns: Batch of observations, rewards, and done flags. """ for index, (env, action) in enumerate(zip(self._envs, actions)): if not env.action_space.contains(action): message = 'Invalid action at index {}: {}' raise ValueError(message.format(index, action)) if self._blocking: transitions = [ env.step(action) for env, action in zip(self._envs, actions)] else: transitions = [ env.step(action, blocking=False) for env, action in zip(self._envs, actions)] transitions = [transition() for transition in transitions] observs, rewards, dones, infos = zip(*transitions) observ = np.stack(observs) reward = np.stack(rewards) done = np.stack(dones) info = tuple(infos) return observ, reward, done, info
python
def step(self, actions): """Forward a batch of actions to the wrapped environments. Args: actions: Batched action to apply to the environment. Raises: ValueError: Invalid actions. Returns: Batch of observations, rewards, and done flags. """ for index, (env, action) in enumerate(zip(self._envs, actions)): if not env.action_space.contains(action): message = 'Invalid action at index {}: {}' raise ValueError(message.format(index, action)) if self._blocking: transitions = [ env.step(action) for env, action in zip(self._envs, actions)] else: transitions = [ env.step(action, blocking=False) for env, action in zip(self._envs, actions)] transitions = [transition() for transition in transitions] observs, rewards, dones, infos = zip(*transitions) observ = np.stack(observs) reward = np.stack(rewards) done = np.stack(dones) info = tuple(infos) return observ, reward, done, info
[ "def", "step", "(", "self", ",", "actions", ")", ":", "for", "index", ",", "(", "env", ",", "action", ")", "in", "enumerate", "(", "zip", "(", "self", ".", "_envs", ",", "actions", ")", ")", ":", "if", "not", "env", ".", "action_space", ".", "contains", "(", "action", ")", ":", "message", "=", "'Invalid action at index {}: {}'", "raise", "ValueError", "(", "message", ".", "format", "(", "index", ",", "action", ")", ")", "if", "self", ".", "_blocking", ":", "transitions", "=", "[", "env", ".", "step", "(", "action", ")", "for", "env", ",", "action", "in", "zip", "(", "self", ".", "_envs", ",", "actions", ")", "]", "else", ":", "transitions", "=", "[", "env", ".", "step", "(", "action", ",", "blocking", "=", "False", ")", "for", "env", ",", "action", "in", "zip", "(", "self", ".", "_envs", ",", "actions", ")", "]", "transitions", "=", "[", "transition", "(", ")", "for", "transition", "in", "transitions", "]", "observs", ",", "rewards", ",", "dones", ",", "infos", "=", "zip", "(", "*", "transitions", ")", "observ", "=", "np", ".", "stack", "(", "observs", ")", "reward", "=", "np", ".", "stack", "(", "rewards", ")", "done", "=", "np", ".", "stack", "(", "dones", ")", "info", "=", "tuple", "(", "infos", ")", "return", "observ", ",", "reward", ",", "done", ",", "info" ]
Forward a batch of actions to the wrapped environments. Args: actions: Batched action to apply to the environment. Raises: ValueError: Invalid actions. Returns: Batch of observations, rewards, and done flags.
[ "Forward", "a", "batch", "of", "actions", "to", "the", "wrapped", "environments", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/batch_env.py#L69-L99
train
google-research/batch-ppo
agents/tools/wrappers.py
ExternalProcess.call
def call(self, name, *args, **kwargs): """Asynchronously call a method of the external environment. Args: name: Name of the method to call. *args: Positional arguments to forward to the method. **kwargs: Keyword arguments to forward to the method. Returns: Promise object that blocks and provides the return value when called. """ payload = name, args, kwargs self._conn.send((self._CALL, payload)) return self._receive
python
def call(self, name, *args, **kwargs): """Asynchronously call a method of the external environment. Args: name: Name of the method to call. *args: Positional arguments to forward to the method. **kwargs: Keyword arguments to forward to the method. Returns: Promise object that blocks and provides the return value when called. """ payload = name, args, kwargs self._conn.send((self._CALL, payload)) return self._receive
[ "def", "call", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "payload", "=", "name", ",", "args", ",", "kwargs", "self", ".", "_conn", ".", "send", "(", "(", "self", ".", "_CALL", ",", "payload", ")", ")", "return", "self", ".", "_receive" ]
Asynchronously call a method of the external environment. Args: name: Name of the method to call. *args: Positional arguments to forward to the method. **kwargs: Keyword arguments to forward to the method. Returns: Promise object that blocks and provides the return value when called.
[ "Asynchronously", "call", "a", "method", "of", "the", "external", "environment", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L363-L376
train
google-research/batch-ppo
agents/tools/wrappers.py
ExternalProcess.close
def close(self): """Send a close message to the external process and join it.""" try: self._conn.send((self._CLOSE, None)) self._conn.close() except IOError: # The connection was already closed. pass self._process.join()
python
def close(self): """Send a close message to the external process and join it.""" try: self._conn.send((self._CLOSE, None)) self._conn.close() except IOError: # The connection was already closed. pass self._process.join()
[ "def", "close", "(", "self", ")", ":", "try", ":", "self", ".", "_conn", ".", "send", "(", "(", "self", ".", "_CLOSE", ",", "None", ")", ")", "self", ".", "_conn", ".", "close", "(", ")", "except", "IOError", ":", "# The connection was already closed.", "pass", "self", ".", "_process", ".", "join", "(", ")" ]
Send a close message to the external process and join it.
[ "Send", "a", "close", "message", "to", "the", "external", "process", "and", "join", "it", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L378-L386
train
google-research/batch-ppo
agents/tools/wrappers.py
ExternalProcess.step
def step(self, action, blocking=True): """Step the environment. Args: action: The action to apply to the environment. blocking: Whether to wait for the result. Returns: Transition tuple when blocking, otherwise callable that returns the transition tuple. """ promise = self.call('step', action) if blocking: return promise() else: return promise
python
def step(self, action, blocking=True): """Step the environment. Args: action: The action to apply to the environment. blocking: Whether to wait for the result. Returns: Transition tuple when blocking, otherwise callable that returns the transition tuple. """ promise = self.call('step', action) if blocking: return promise() else: return promise
[ "def", "step", "(", "self", ",", "action", ",", "blocking", "=", "True", ")", ":", "promise", "=", "self", ".", "call", "(", "'step'", ",", "action", ")", "if", "blocking", ":", "return", "promise", "(", ")", "else", ":", "return", "promise" ]
Step the environment. Args: action: The action to apply to the environment. blocking: Whether to wait for the result. Returns: Transition tuple when blocking, otherwise callable that returns the transition tuple.
[ "Step", "the", "environment", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L388-L403
train
google-research/batch-ppo
agents/tools/wrappers.py
ExternalProcess._receive
def _receive(self): """Wait for a message from the worker process and return its payload. Raises: Exception: An exception was raised inside the worker process. KeyError: The received message is of an unknown type. Returns: Payload object of the message. """ message, payload = self._conn.recv() # Re-raise exceptions in the main process. if message == self._EXCEPTION: stacktrace = payload raise Exception(stacktrace) if message == self._RESULT: return payload raise KeyError('Received message of unexpected type {}'.format(message))
python
def _receive(self): """Wait for a message from the worker process and return its payload. Raises: Exception: An exception was raised inside the worker process. KeyError: The received message is of an unknown type. Returns: Payload object of the message. """ message, payload = self._conn.recv() # Re-raise exceptions in the main process. if message == self._EXCEPTION: stacktrace = payload raise Exception(stacktrace) if message == self._RESULT: return payload raise KeyError('Received message of unexpected type {}'.format(message))
[ "def", "_receive", "(", "self", ")", ":", "message", ",", "payload", "=", "self", ".", "_conn", ".", "recv", "(", ")", "# Re-raise exceptions in the main process.", "if", "message", "==", "self", ".", "_EXCEPTION", ":", "stacktrace", "=", "payload", "raise", "Exception", "(", "stacktrace", ")", "if", "message", "==", "self", ".", "_RESULT", ":", "return", "payload", "raise", "KeyError", "(", "'Received message of unexpected type {}'", ".", "format", "(", "message", ")", ")" ]
Wait for a message from the worker process and return its payload. Raises: Exception: An exception was raised inside the worker process. KeyError: The received message is of an unknown type. Returns: Payload object of the message.
[ "Wait", "for", "a", "message", "from", "the", "worker", "process", "and", "return", "its", "payload", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L421-L438
train
google-research/batch-ppo
agents/tools/wrappers.py
ExternalProcess._worker
def _worker(self, constructor, conn): """The process waits for actions and sends back environment results. Args: constructor: Constructor for the OpenAI Gym environment. conn: Connection for communication to the main process. Raises: KeyError: When receiving a message of unknown type. """ try: env = constructor() while True: try: # Only block for short times to have keyboard exceptions be raised. if not conn.poll(0.1): continue message, payload = conn.recv() except (EOFError, KeyboardInterrupt): break if message == self._ACCESS: name = payload result = getattr(env, name) conn.send((self._RESULT, result)) continue if message == self._CALL: name, args, kwargs = payload result = getattr(env, name)(*args, **kwargs) conn.send((self._RESULT, result)) continue if message == self._CLOSE: assert payload is None break raise KeyError('Received message of unknown type {}'.format(message)) except Exception: # pylint: disable=broad-except stacktrace = ''.join(traceback.format_exception(*sys.exc_info())) tf.logging.error('Error in environment process: {}'.format(stacktrace)) conn.send((self._EXCEPTION, stacktrace)) conn.close()
python
def _worker(self, constructor, conn): """The process waits for actions and sends back environment results. Args: constructor: Constructor for the OpenAI Gym environment. conn: Connection for communication to the main process. Raises: KeyError: When receiving a message of unknown type. """ try: env = constructor() while True: try: # Only block for short times to have keyboard exceptions be raised. if not conn.poll(0.1): continue message, payload = conn.recv() except (EOFError, KeyboardInterrupt): break if message == self._ACCESS: name = payload result = getattr(env, name) conn.send((self._RESULT, result)) continue if message == self._CALL: name, args, kwargs = payload result = getattr(env, name)(*args, **kwargs) conn.send((self._RESULT, result)) continue if message == self._CLOSE: assert payload is None break raise KeyError('Received message of unknown type {}'.format(message)) except Exception: # pylint: disable=broad-except stacktrace = ''.join(traceback.format_exception(*sys.exc_info())) tf.logging.error('Error in environment process: {}'.format(stacktrace)) conn.send((self._EXCEPTION, stacktrace)) conn.close()
[ "def", "_worker", "(", "self", ",", "constructor", ",", "conn", ")", ":", "try", ":", "env", "=", "constructor", "(", ")", "while", "True", ":", "try", ":", "# Only block for short times to have keyboard exceptions be raised.", "if", "not", "conn", ".", "poll", "(", "0.1", ")", ":", "continue", "message", ",", "payload", "=", "conn", ".", "recv", "(", ")", "except", "(", "EOFError", ",", "KeyboardInterrupt", ")", ":", "break", "if", "message", "==", "self", ".", "_ACCESS", ":", "name", "=", "payload", "result", "=", "getattr", "(", "env", ",", "name", ")", "conn", ".", "send", "(", "(", "self", ".", "_RESULT", ",", "result", ")", ")", "continue", "if", "message", "==", "self", ".", "_CALL", ":", "name", ",", "args", ",", "kwargs", "=", "payload", "result", "=", "getattr", "(", "env", ",", "name", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "conn", ".", "send", "(", "(", "self", ".", "_RESULT", ",", "result", ")", ")", "continue", "if", "message", "==", "self", ".", "_CLOSE", ":", "assert", "payload", "is", "None", "break", "raise", "KeyError", "(", "'Received message of unknown type {}'", ".", "format", "(", "message", ")", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "stacktrace", "=", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "*", "sys", ".", "exc_info", "(", ")", ")", ")", "tf", ".", "logging", ".", "error", "(", "'Error in environment process: {}'", ".", "format", "(", "stacktrace", ")", ")", "conn", ".", "send", "(", "(", "self", ".", "_EXCEPTION", ",", "stacktrace", ")", ")", "conn", ".", "close", "(", ")" ]
The process waits for actions and sends back environment results. Args: constructor: Constructor for the OpenAI Gym environment. conn: Connection for communication to the main process. Raises: KeyError: When receiving a message of unknown type.
[ "The", "process", "waits", "for", "actions", "and", "sends", "back", "environment", "results", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L440-L478
train
google-research/batch-ppo
agents/tools/wrappers.py
ConvertTo32Bit.step
def step(self, action): """Forward action to the wrapped environment. Args: action: Action to apply to the environment. Raises: ValueError: Invalid action. Returns: Converted observation, converted reward, done flag, and info object. """ observ, reward, done, info = self._env.step(action) observ = self._convert_observ(observ) reward = self._convert_reward(reward) return observ, reward, done, info
python
def step(self, action): """Forward action to the wrapped environment. Args: action: Action to apply to the environment. Raises: ValueError: Invalid action. Returns: Converted observation, converted reward, done flag, and info object. """ observ, reward, done, info = self._env.step(action) observ = self._convert_observ(observ) reward = self._convert_reward(reward) return observ, reward, done, info
[ "def", "step", "(", "self", ",", "action", ")", ":", "observ", ",", "reward", ",", "done", ",", "info", "=", "self", ".", "_env", ".", "step", "(", "action", ")", "observ", "=", "self", ".", "_convert_observ", "(", "observ", ")", "reward", "=", "self", ".", "_convert_reward", "(", "reward", ")", "return", "observ", ",", "reward", ",", "done", ",", "info" ]
Forward action to the wrapped environment. Args: action: Action to apply to the environment. Raises: ValueError: Invalid action. Returns: Converted observation, converted reward, done flag, and info object.
[ "Forward", "action", "to", "the", "wrapped", "environment", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L503-L518
train
google-research/batch-ppo
agents/tools/wrappers.py
ConvertTo32Bit._convert_observ
def _convert_observ(self, observ): """Convert the observation to 32 bits. Args: observ: Numpy observation. Raises: ValueError: Observation contains infinite values. Returns: Numpy observation with 32-bit data type. """ if not np.isfinite(observ).all(): raise ValueError('Infinite observation encountered.') if observ.dtype == np.float64: return observ.astype(np.float32) if observ.dtype == np.int64: return observ.astype(np.int32) return observ
python
def _convert_observ(self, observ): """Convert the observation to 32 bits. Args: observ: Numpy observation. Raises: ValueError: Observation contains infinite values. Returns: Numpy observation with 32-bit data type. """ if not np.isfinite(observ).all(): raise ValueError('Infinite observation encountered.') if observ.dtype == np.float64: return observ.astype(np.float32) if observ.dtype == np.int64: return observ.astype(np.int32) return observ
[ "def", "_convert_observ", "(", "self", ",", "observ", ")", ":", "if", "not", "np", ".", "isfinite", "(", "observ", ")", ".", "all", "(", ")", ":", "raise", "ValueError", "(", "'Infinite observation encountered.'", ")", "if", "observ", ".", "dtype", "==", "np", ".", "float64", ":", "return", "observ", ".", "astype", "(", "np", ".", "float32", ")", "if", "observ", ".", "dtype", "==", "np", ".", "int64", ":", "return", "observ", ".", "astype", "(", "np", ".", "int32", ")", "return", "observ" ]
Convert the observation to 32 bits. Args: observ: Numpy observation. Raises: ValueError: Observation contains infinite values. Returns: Numpy observation with 32-bit data type.
[ "Convert", "the", "observation", "to", "32", "bits", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L530-L548
train
google-research/batch-ppo
agents/tools/wrappers.py
ConvertTo32Bit._convert_reward
def _convert_reward(self, reward): """Convert the reward to 32 bits. Args: reward: Numpy reward. Raises: ValueError: Rewards contain infinite values. Returns: Numpy reward with 32-bit data type. """ if not np.isfinite(reward).all(): raise ValueError('Infinite reward encountered.') return np.array(reward, dtype=np.float32)
python
def _convert_reward(self, reward): """Convert the reward to 32 bits. Args: reward: Numpy reward. Raises: ValueError: Rewards contain infinite values. Returns: Numpy reward with 32-bit data type. """ if not np.isfinite(reward).all(): raise ValueError('Infinite reward encountered.') return np.array(reward, dtype=np.float32)
[ "def", "_convert_reward", "(", "self", ",", "reward", ")", ":", "if", "not", "np", ".", "isfinite", "(", "reward", ")", ".", "all", "(", ")", ":", "raise", "ValueError", "(", "'Infinite reward encountered.'", ")", "return", "np", ".", "array", "(", "reward", ",", "dtype", "=", "np", ".", "float32", ")" ]
Convert the reward to 32 bits. Args: reward: Numpy reward. Raises: ValueError: Rewards contain infinite values. Returns: Numpy reward with 32-bit data type.
[ "Convert", "the", "reward", "to", "32", "bits", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L550-L564
train
google-research/batch-ppo
agents/tools/streaming_mean.py
StreamingMean.value
def value(self): """The current value of the mean.""" return self._sum / tf.cast(self._count, self._dtype)
python
def value(self): """The current value of the mean.""" return self._sum / tf.cast(self._count, self._dtype)
[ "def", "value", "(", "self", ")", ":", "return", "self", ".", "_sum", "/", "tf", ".", "cast", "(", "self", ".", "_count", ",", "self", ".", "_dtype", ")" ]
The current value of the mean.
[ "The", "current", "value", "of", "the", "mean", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/streaming_mean.py#L42-L44
train
google-research/batch-ppo
agents/tools/streaming_mean.py
StreamingMean.submit
def submit(self, value): """Submit a single or batch tensor to refine the streaming mean.""" # Add a batch dimension if necessary. if value.shape.ndims == self._sum.shape.ndims: value = value[None, ...] return tf.group( self._sum.assign_add(tf.reduce_sum(value, 0)), self._count.assign_add(tf.shape(value)[0]))
python
def submit(self, value): """Submit a single or batch tensor to refine the streaming mean.""" # Add a batch dimension if necessary. if value.shape.ndims == self._sum.shape.ndims: value = value[None, ...] return tf.group( self._sum.assign_add(tf.reduce_sum(value, 0)), self._count.assign_add(tf.shape(value)[0]))
[ "def", "submit", "(", "self", ",", "value", ")", ":", "# Add a batch dimension if necessary.", "if", "value", ".", "shape", ".", "ndims", "==", "self", ".", "_sum", ".", "shape", ".", "ndims", ":", "value", "=", "value", "[", "None", ",", "...", "]", "return", "tf", ".", "group", "(", "self", ".", "_sum", ".", "assign_add", "(", "tf", ".", "reduce_sum", "(", "value", ",", "0", ")", ")", ",", "self", ".", "_count", ".", "assign_add", "(", "tf", ".", "shape", "(", "value", ")", "[", "0", "]", ")", ")" ]
Submit a single or batch tensor to refine the streaming mean.
[ "Submit", "a", "single", "or", "batch", "tensor", "to", "refine", "the", "streaming", "mean", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/streaming_mean.py#L51-L58
train
google-research/batch-ppo
agents/tools/streaming_mean.py
StreamingMean.clear
def clear(self): """Return the mean estimate and reset the streaming statistics.""" value = self._sum / tf.cast(self._count, self._dtype) with tf.control_dependencies([value]): reset_value = self._sum.assign(tf.zeros_like(self._sum)) reset_count = self._count.assign(0) with tf.control_dependencies([reset_value, reset_count]): return tf.identity(value)
python
def clear(self): """Return the mean estimate and reset the streaming statistics.""" value = self._sum / tf.cast(self._count, self._dtype) with tf.control_dependencies([value]): reset_value = self._sum.assign(tf.zeros_like(self._sum)) reset_count = self._count.assign(0) with tf.control_dependencies([reset_value, reset_count]): return tf.identity(value)
[ "def", "clear", "(", "self", ")", ":", "value", "=", "self", ".", "_sum", "/", "tf", ".", "cast", "(", "self", ".", "_count", ",", "self", ".", "_dtype", ")", "with", "tf", ".", "control_dependencies", "(", "[", "value", "]", ")", ":", "reset_value", "=", "self", ".", "_sum", ".", "assign", "(", "tf", ".", "zeros_like", "(", "self", ".", "_sum", ")", ")", "reset_count", "=", "self", ".", "_count", ".", "assign", "(", "0", ")", "with", "tf", ".", "control_dependencies", "(", "[", "reset_value", ",", "reset_count", "]", ")", ":", "return", "tf", ".", "identity", "(", "value", ")" ]
Return the mean estimate and reset the streaming statistics.
[ "Return", "the", "mean", "estimate", "and", "reset", "the", "streaming", "statistics", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/streaming_mean.py#L60-L67
train
google-research/batch-ppo
agents/tools/nested.py
zip_
def zip_(*structures, **kwargs): # pylint: disable=differing-param-doc,missing-param-doc """Combine corresponding elements in multiple nested structure to tuples. The nested structures can consist of any combination of lists, tuples, and dicts. All provided structures must have the same nesting. Args: *structures: Nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure. """ # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs.pop('flatten', False) assert not kwargs, 'zip() got unexpected keyword arguments.' return map( lambda *x: x if len(x) > 1 else x[0], *structures, flatten=flatten)
python
def zip_(*structures, **kwargs): # pylint: disable=differing-param-doc,missing-param-doc """Combine corresponding elements in multiple nested structure to tuples. The nested structures can consist of any combination of lists, tuples, and dicts. All provided structures must have the same nesting. Args: *structures: Nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure. """ # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs.pop('flatten', False) assert not kwargs, 'zip() got unexpected keyword arguments.' return map( lambda *x: x if len(x) > 1 else x[0], *structures, flatten=flatten)
[ "def", "zip_", "(", "*", "structures", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=differing-param-doc,missing-param-doc", "# Named keyword arguments are not allowed after *args in Python 2.", "flatten", "=", "kwargs", ".", "pop", "(", "'flatten'", ",", "False", ")", "assert", "not", "kwargs", ",", "'zip() got unexpected keyword arguments.'", "return", "map", "(", "lambda", "*", "x", ":", "x", "if", "len", "(", "x", ")", ">", "1", "else", "x", "[", "0", "]", ",", "*", "structures", ",", "flatten", "=", "flatten", ")" ]
Combine corresponding elements in multiple nested structure to tuples. The nested structures can consist of any combination of lists, tuples, and dicts. All provided structures must have the same nesting. Args: *structures: Nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure.
[ "Combine", "corresponding", "elements", "in", "multiple", "nested", "structure", "to", "tuples", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L29-L50
train
google-research/batch-ppo
agents/tools/nested.py
map_
def map_(function, *structures, **kwargs): # pylint: disable=differing-param-doc,missing-param-doc """Apply a function to every element in a nested structure. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: function: The function to apply to the elements of the structure. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure. """ # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs.pop('flatten', False) assert not kwargs, 'map() got unexpected keyword arguments.' def impl(function, *structures): if len(structures) == 0: # pylint: disable=len-as-condition return structures if all(isinstance(s, (tuple, list)) for s in structures): if len(set(len(x) for x in structures)) > 1: raise ValueError('Cannot merge tuples or lists of different length.') args = tuple((impl(function, *x) for x in _builtin_zip(*structures))) if hasattr(structures[0], '_fields'): # namedtuple return type(structures[0])(*args) else: # tuple, list return type(structures[0])(args) if all(isinstance(s, dict) for s in structures): if len(set(frozenset(x.keys()) for x in structures)) > 1: raise ValueError('Cannot merge dicts with different keys.') merged = { k: impl(function, *(s[k] for s in structures)) for k in structures[0]} return type(structures[0])(merged) return function(*structures) result = impl(function, *structures) if flatten: result = flatten_(result) return result
python
def map_(function, *structures, **kwargs): # pylint: disable=differing-param-doc,missing-param-doc """Apply a function to every element in a nested structure. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: function: The function to apply to the elements of the structure. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure. """ # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs.pop('flatten', False) assert not kwargs, 'map() got unexpected keyword arguments.' def impl(function, *structures): if len(structures) == 0: # pylint: disable=len-as-condition return structures if all(isinstance(s, (tuple, list)) for s in structures): if len(set(len(x) for x in structures)) > 1: raise ValueError('Cannot merge tuples or lists of different length.') args = tuple((impl(function, *x) for x in _builtin_zip(*structures))) if hasattr(structures[0], '_fields'): # namedtuple return type(structures[0])(*args) else: # tuple, list return type(structures[0])(args) if all(isinstance(s, dict) for s in structures): if len(set(frozenset(x.keys()) for x in structures)) > 1: raise ValueError('Cannot merge dicts with different keys.') merged = { k: impl(function, *(s[k] for s in structures)) for k in structures[0]} return type(structures[0])(merged) return function(*structures) result = impl(function, *structures) if flatten: result = flatten_(result) return result
[ "def", "map_", "(", "function", ",", "*", "structures", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=differing-param-doc,missing-param-doc", "# Named keyword arguments are not allowed after *args in Python 2.", "flatten", "=", "kwargs", ".", "pop", "(", "'flatten'", ",", "False", ")", "assert", "not", "kwargs", ",", "'map() got unexpected keyword arguments.'", "def", "impl", "(", "function", ",", "*", "structures", ")", ":", "if", "len", "(", "structures", ")", "==", "0", ":", "# pylint: disable=len-as-condition", "return", "structures", "if", "all", "(", "isinstance", "(", "s", ",", "(", "tuple", ",", "list", ")", ")", "for", "s", "in", "structures", ")", ":", "if", "len", "(", "set", "(", "len", "(", "x", ")", "for", "x", "in", "structures", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "'Cannot merge tuples or lists of different length.'", ")", "args", "=", "tuple", "(", "(", "impl", "(", "function", ",", "*", "x", ")", "for", "x", "in", "_builtin_zip", "(", "*", "structures", ")", ")", ")", "if", "hasattr", "(", "structures", "[", "0", "]", ",", "'_fields'", ")", ":", "# namedtuple", "return", "type", "(", "structures", "[", "0", "]", ")", "(", "*", "args", ")", "else", ":", "# tuple, list", "return", "type", "(", "structures", "[", "0", "]", ")", "(", "args", ")", "if", "all", "(", "isinstance", "(", "s", ",", "dict", ")", "for", "s", "in", "structures", ")", ":", "if", "len", "(", "set", "(", "frozenset", "(", "x", ".", "keys", "(", ")", ")", "for", "x", "in", "structures", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "'Cannot merge dicts with different keys.'", ")", "merged", "=", "{", "k", ":", "impl", "(", "function", ",", "*", "(", "s", "[", "k", "]", "for", "s", "in", "structures", ")", ")", "for", "k", "in", "structures", "[", "0", "]", "}", "return", "type", "(", "structures", "[", "0", "]", ")", "(", "merged", ")", "return", "function", "(", "*", "structures", ")", "result", "=", "impl", "(", "function", ",", "*", "structures", ")", "if", "flatten", ":", "result", "=", "flatten_", "(", "result", ")", "return", "result" ]
Apply a function to every element in a nested structure. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: function: The function to apply to the elements of the structure. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure.
[ "Apply", "a", "function", "to", "every", "element", "in", "a", "nested", "structure", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L53-L98
train
google-research/batch-ppo
agents/tools/nested.py
flatten_
def flatten_(structure): """Combine all leaves of a nested structure into a tuple. The nested structure can consist of any combination of tuples, lists, and dicts. Dictionary keys will be discarded but values will ordered by the sorting of the keys. Args: structure: Nested structure. Returns: Flat tuple. """ if isinstance(structure, dict): if structure: structure = zip(*sorted(structure.items(), key=lambda x: x[0]))[1] else: # Zip doesn't work on an the items of an empty dictionary. structure = () if isinstance(structure, (tuple, list)): result = [] for element in structure: result += flatten_(element) return tuple(result) return (structure,)
python
def flatten_(structure): """Combine all leaves of a nested structure into a tuple. The nested structure can consist of any combination of tuples, lists, and dicts. Dictionary keys will be discarded but values will ordered by the sorting of the keys. Args: structure: Nested structure. Returns: Flat tuple. """ if isinstance(structure, dict): if structure: structure = zip(*sorted(structure.items(), key=lambda x: x[0]))[1] else: # Zip doesn't work on an the items of an empty dictionary. structure = () if isinstance(structure, (tuple, list)): result = [] for element in structure: result += flatten_(element) return tuple(result) return (structure,)
[ "def", "flatten_", "(", "structure", ")", ":", "if", "isinstance", "(", "structure", ",", "dict", ")", ":", "if", "structure", ":", "structure", "=", "zip", "(", "*", "sorted", "(", "structure", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", ")", "[", "1", "]", "else", ":", "# Zip doesn't work on an the items of an empty dictionary.", "structure", "=", "(", ")", "if", "isinstance", "(", "structure", ",", "(", "tuple", ",", "list", ")", ")", ":", "result", "=", "[", "]", "for", "element", "in", "structure", ":", "result", "+=", "flatten_", "(", "element", ")", "return", "tuple", "(", "result", ")", "return", "(", "structure", ",", ")" ]
Combine all leaves of a nested structure into a tuple. The nested structure can consist of any combination of tuples, lists, and dicts. Dictionary keys will be discarded but values will ordered by the sorting of the keys. Args: structure: Nested structure. Returns: Flat tuple.
[ "Combine", "all", "leaves", "of", "a", "nested", "structure", "into", "a", "tuple", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L101-L125
train
google-research/batch-ppo
agents/tools/nested.py
filter_
def filter_(predicate, *structures, **kwargs): # pylint: disable=differing-param-doc,missing-param-doc, too-many-branches """Select elements of a nested structure based on a predicate function. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: predicate: The function to determine whether an element should be kept. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure. """ # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs.pop('flatten', False) assert not kwargs, 'filter() got unexpected keyword arguments.' def impl(predicate, *structures): if len(structures) == 0: # pylint: disable=len-as-condition return structures if all(isinstance(s, (tuple, list)) for s in structures): if len(set(len(x) for x in structures)) > 1: raise ValueError('Cannot merge tuples or lists of different length.') # Only wrap in tuples if more than one structure provided. if len(structures) > 1: filtered = (impl(predicate, *x) for x in _builtin_zip(*structures)) else: filtered = (impl(predicate, x) for x in structures[0]) # Remove empty containers and construct result structure. if hasattr(structures[0], '_fields'): # namedtuple filtered = (x if x != () else None for x in filtered) return type(structures[0])(*filtered) else: # tuple, list filtered = ( x for x in filtered if not isinstance(x, (tuple, list, dict)) or x) return type(structures[0])(filtered) if all(isinstance(s, dict) for s in structures): if len(set(frozenset(x.keys()) for x in structures)) > 1: raise ValueError('Cannot merge dicts with different keys.') # Only wrap in tuples if more than one structure provided. if len(structures) > 1: filtered = { k: impl(predicate, *(s[k] for s in structures)) for k in structures[0]} else: filtered = {k: impl(predicate, v) for k, v in structures[0].items()} # Remove empty containers and construct result structure. filtered = { k: v for k, v in filtered.items() if not isinstance(v, (tuple, list, dict)) or v} return type(structures[0])(filtered) if len(structures) > 1: return structures if predicate(*structures) else () else: return structures[0] if predicate(structures[0]) else () result = impl(predicate, *structures) if flatten: result = flatten_(result) return result
python
def filter_(predicate, *structures, **kwargs): # pylint: disable=differing-param-doc,missing-param-doc, too-many-branches """Select elements of a nested structure based on a predicate function. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: predicate: The function to determine whether an element should be kept. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure. """ # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs.pop('flatten', False) assert not kwargs, 'filter() got unexpected keyword arguments.' def impl(predicate, *structures): if len(structures) == 0: # pylint: disable=len-as-condition return structures if all(isinstance(s, (tuple, list)) for s in structures): if len(set(len(x) for x in structures)) > 1: raise ValueError('Cannot merge tuples or lists of different length.') # Only wrap in tuples if more than one structure provided. if len(structures) > 1: filtered = (impl(predicate, *x) for x in _builtin_zip(*structures)) else: filtered = (impl(predicate, x) for x in structures[0]) # Remove empty containers and construct result structure. if hasattr(structures[0], '_fields'): # namedtuple filtered = (x if x != () else None for x in filtered) return type(structures[0])(*filtered) else: # tuple, list filtered = ( x for x in filtered if not isinstance(x, (tuple, list, dict)) or x) return type(structures[0])(filtered) if all(isinstance(s, dict) for s in structures): if len(set(frozenset(x.keys()) for x in structures)) > 1: raise ValueError('Cannot merge dicts with different keys.') # Only wrap in tuples if more than one structure provided. if len(structures) > 1: filtered = { k: impl(predicate, *(s[k] for s in structures)) for k in structures[0]} else: filtered = {k: impl(predicate, v) for k, v in structures[0].items()} # Remove empty containers and construct result structure. filtered = { k: v for k, v in filtered.items() if not isinstance(v, (tuple, list, dict)) or v} return type(structures[0])(filtered) if len(structures) > 1: return structures if predicate(*structures) else () else: return structures[0] if predicate(structures[0]) else () result = impl(predicate, *structures) if flatten: result = flatten_(result) return result
[ "def", "filter_", "(", "predicate", ",", "*", "structures", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=differing-param-doc,missing-param-doc, too-many-branches", "# Named keyword arguments are not allowed after *args in Python 2.", "flatten", "=", "kwargs", ".", "pop", "(", "'flatten'", ",", "False", ")", "assert", "not", "kwargs", ",", "'filter() got unexpected keyword arguments.'", "def", "impl", "(", "predicate", ",", "*", "structures", ")", ":", "if", "len", "(", "structures", ")", "==", "0", ":", "# pylint: disable=len-as-condition", "return", "structures", "if", "all", "(", "isinstance", "(", "s", ",", "(", "tuple", ",", "list", ")", ")", "for", "s", "in", "structures", ")", ":", "if", "len", "(", "set", "(", "len", "(", "x", ")", "for", "x", "in", "structures", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "'Cannot merge tuples or lists of different length.'", ")", "# Only wrap in tuples if more than one structure provided.", "if", "len", "(", "structures", ")", ">", "1", ":", "filtered", "=", "(", "impl", "(", "predicate", ",", "*", "x", ")", "for", "x", "in", "_builtin_zip", "(", "*", "structures", ")", ")", "else", ":", "filtered", "=", "(", "impl", "(", "predicate", ",", "x", ")", "for", "x", "in", "structures", "[", "0", "]", ")", "# Remove empty containers and construct result structure.", "if", "hasattr", "(", "structures", "[", "0", "]", ",", "'_fields'", ")", ":", "# namedtuple", "filtered", "=", "(", "x", "if", "x", "!=", "(", ")", "else", "None", "for", "x", "in", "filtered", ")", "return", "type", "(", "structures", "[", "0", "]", ")", "(", "*", "filtered", ")", "else", ":", "# tuple, list", "filtered", "=", "(", "x", "for", "x", "in", "filtered", "if", "not", "isinstance", "(", "x", ",", "(", "tuple", ",", "list", ",", "dict", ")", ")", "or", "x", ")", "return", "type", "(", "structures", "[", "0", "]", ")", "(", "filtered", ")", "if", "all", "(", "isinstance", "(", "s", ",", "dict", ")", "for", "s", "in", "structures", ")", ":", "if", "len", "(", "set", "(", "frozenset", "(", "x", ".", "keys", "(", ")", ")", "for", "x", "in", "structures", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "'Cannot merge dicts with different keys.'", ")", "# Only wrap in tuples if more than one structure provided.", "if", "len", "(", "structures", ")", ">", "1", ":", "filtered", "=", "{", "k", ":", "impl", "(", "predicate", ",", "*", "(", "s", "[", "k", "]", "for", "s", "in", "structures", ")", ")", "for", "k", "in", "structures", "[", "0", "]", "}", "else", ":", "filtered", "=", "{", "k", ":", "impl", "(", "predicate", ",", "v", ")", "for", "k", ",", "v", "in", "structures", "[", "0", "]", ".", "items", "(", ")", "}", "# Remove empty containers and construct result structure.", "filtered", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "filtered", ".", "items", "(", ")", "if", "not", "isinstance", "(", "v", ",", "(", "tuple", ",", "list", ",", "dict", ")", ")", "or", "v", "}", "return", "type", "(", "structures", "[", "0", "]", ")", "(", "filtered", ")", "if", "len", "(", "structures", ")", ">", "1", ":", "return", "structures", "if", "predicate", "(", "*", "structures", ")", "else", "(", ")", "else", ":", "return", "structures", "[", "0", "]", "if", "predicate", "(", "structures", "[", "0", "]", ")", "else", "(", ")", "result", "=", "impl", "(", "predicate", ",", "*", "structures", ")", "if", "flatten", ":", "result", "=", "flatten_", "(", "result", ")", "return", "result" ]
Select elements of a nested structure based on a predicate function. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: predicate: The function to determine whether an element should be kept. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure.
[ "Select", "elements", "of", "a", "nested", "structure", "based", "on", "a", "predicate", "function", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L128-L192
train
google-research/batch-ppo
agents/tools/loop.py
Loop.add_phase
def add_phase( self, name, done, score, summary, steps, report_every=None, log_every=None, checkpoint_every=None, feed=None): """Add a phase to the loop protocol. If the model breaks long computation into multiple steps, the done tensor indicates whether the current score should be added to the mean counter. For example, in reinforcement learning we only have a valid score at the end of the episode. Score and done tensors can either be scalars or vectors, to support single and batched computations. Args: name: Name for the phase, used for the summary writer. done: Tensor indicating whether current score can be used. score: Tensor holding the current, possibly intermediate, score. summary: Tensor holding summary string to write if not an empty string. steps: Duration of the phase in steps. report_every: Yield mean score every this number of steps. log_every: Request summaries via `log` tensor every this number of steps. checkpoint_every: Write checkpoint every this number of steps. feed: Additional feed dictionary for the session run call. Raises: ValueError: Unknown rank for done or score tensors. """ done = tf.convert_to_tensor(done, tf.bool) score = tf.convert_to_tensor(score, tf.float32) summary = tf.convert_to_tensor(summary, tf.string) feed = feed or {} if done.shape.ndims is None or score.shape.ndims is None: raise ValueError("Rank of 'done' and 'score' tensors must be known.") writer = self._logdir and tf.summary.FileWriter( os.path.join(self._logdir, name), tf.get_default_graph(), flush_secs=60) op = self._define_step(done, score, summary) batch = 1 if score.shape.ndims == 0 else score.shape[0].value self._phases.append(_Phase( name, writer, op, batch, int(steps), feed, report_every, log_every, checkpoint_every))
python
def add_phase( self, name, done, score, summary, steps, report_every=None, log_every=None, checkpoint_every=None, feed=None): """Add a phase to the loop protocol. If the model breaks long computation into multiple steps, the done tensor indicates whether the current score should be added to the mean counter. For example, in reinforcement learning we only have a valid score at the end of the episode. Score and done tensors can either be scalars or vectors, to support single and batched computations. Args: name: Name for the phase, used for the summary writer. done: Tensor indicating whether current score can be used. score: Tensor holding the current, possibly intermediate, score. summary: Tensor holding summary string to write if not an empty string. steps: Duration of the phase in steps. report_every: Yield mean score every this number of steps. log_every: Request summaries via `log` tensor every this number of steps. checkpoint_every: Write checkpoint every this number of steps. feed: Additional feed dictionary for the session run call. Raises: ValueError: Unknown rank for done or score tensors. """ done = tf.convert_to_tensor(done, tf.bool) score = tf.convert_to_tensor(score, tf.float32) summary = tf.convert_to_tensor(summary, tf.string) feed = feed or {} if done.shape.ndims is None or score.shape.ndims is None: raise ValueError("Rank of 'done' and 'score' tensors must be known.") writer = self._logdir and tf.summary.FileWriter( os.path.join(self._logdir, name), tf.get_default_graph(), flush_secs=60) op = self._define_step(done, score, summary) batch = 1 if score.shape.ndims == 0 else score.shape[0].value self._phases.append(_Phase( name, writer, op, batch, int(steps), feed, report_every, log_every, checkpoint_every))
[ "def", "add_phase", "(", "self", ",", "name", ",", "done", ",", "score", ",", "summary", ",", "steps", ",", "report_every", "=", "None", ",", "log_every", "=", "None", ",", "checkpoint_every", "=", "None", ",", "feed", "=", "None", ")", ":", "done", "=", "tf", ".", "convert_to_tensor", "(", "done", ",", "tf", ".", "bool", ")", "score", "=", "tf", ".", "convert_to_tensor", "(", "score", ",", "tf", ".", "float32", ")", "summary", "=", "tf", ".", "convert_to_tensor", "(", "summary", ",", "tf", ".", "string", ")", "feed", "=", "feed", "or", "{", "}", "if", "done", ".", "shape", ".", "ndims", "is", "None", "or", "score", ".", "shape", ".", "ndims", "is", "None", ":", "raise", "ValueError", "(", "\"Rank of 'done' and 'score' tensors must be known.\"", ")", "writer", "=", "self", ".", "_logdir", "and", "tf", ".", "summary", ".", "FileWriter", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_logdir", ",", "name", ")", ",", "tf", ".", "get_default_graph", "(", ")", ",", "flush_secs", "=", "60", ")", "op", "=", "self", ".", "_define_step", "(", "done", ",", "score", ",", "summary", ")", "batch", "=", "1", "if", "score", ".", "shape", ".", "ndims", "==", "0", "else", "score", ".", "shape", "[", "0", "]", ".", "value", "self", ".", "_phases", ".", "append", "(", "_Phase", "(", "name", ",", "writer", ",", "op", ",", "batch", ",", "int", "(", "steps", ")", ",", "feed", ",", "report_every", ",", "log_every", ",", "checkpoint_every", ")", ")" ]
Add a phase to the loop protocol. If the model breaks long computation into multiple steps, the done tensor indicates whether the current score should be added to the mean counter. For example, in reinforcement learning we only have a valid score at the end of the episode. Score and done tensors can either be scalars or vectors, to support single and batched computations. Args: name: Name for the phase, used for the summary writer. done: Tensor indicating whether current score can be used. score: Tensor holding the current, possibly intermediate, score. summary: Tensor holding summary string to write if not an empty string. steps: Duration of the phase in steps. report_every: Yield mean score every this number of steps. log_every: Request summaries via `log` tensor every this number of steps. checkpoint_every: Write checkpoint every this number of steps. feed: Additional feed dictionary for the session run call. Raises: ValueError: Unknown rank for done or score tensors.
[ "Add", "a", "phase", "to", "the", "loop", "protocol", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L66-L106
train
google-research/batch-ppo
agents/tools/loop.py
Loop.run
def run(self, sess, saver, max_step=None): """Run the loop schedule for a specified number of steps. Call the operation of the current phase until the global step reaches the specified maximum step. Phases are repeated over and over in the order they were added. Args: sess: Session to use to run the phase operation. saver: Saver used for checkpointing. max_step: Run the operations until the step reaches this limit. Yields: Reported mean scores. """ global_step = sess.run(self._step) steps_made = 1 while True: if max_step and global_step >= max_step: break phase, epoch, steps_in = self._find_current_phase(global_step) phase_step = epoch * phase.steps + steps_in if steps_in % phase.steps < steps_made: message = '\n' + ('-' * 50) + '\n' message += 'Phase {} (phase step {}, global step {}).' tf.logging.info(message.format(phase.name, phase_step, global_step)) # Populate book keeping tensors. phase.feed[self._reset] = (steps_in < steps_made) phase.feed[self._log] = ( phase.writer and self._is_every_steps(phase_step, phase.batch, phase.log_every)) phase.feed[self._report] = ( self._is_every_steps(phase_step, phase.batch, phase.report_every)) summary, mean_score, global_step, steps_made = sess.run( phase.op, phase.feed) if self._is_every_steps(phase_step, phase.batch, phase.checkpoint_every): self._store_checkpoint(sess, saver, global_step) if self._is_every_steps(phase_step, phase.batch, phase.report_every): yield mean_score if summary and phase.writer: # We want smaller phases to catch up at the beginnig of each epoch so # that their graphs are aligned. longest_phase = max(phase.steps for phase in self._phases) summary_step = epoch * longest_phase + steps_in phase.writer.add_summary(summary, summary_step)
python
def run(self, sess, saver, max_step=None): """Run the loop schedule for a specified number of steps. Call the operation of the current phase until the global step reaches the specified maximum step. Phases are repeated over and over in the order they were added. Args: sess: Session to use to run the phase operation. saver: Saver used for checkpointing. max_step: Run the operations until the step reaches this limit. Yields: Reported mean scores. """ global_step = sess.run(self._step) steps_made = 1 while True: if max_step and global_step >= max_step: break phase, epoch, steps_in = self._find_current_phase(global_step) phase_step = epoch * phase.steps + steps_in if steps_in % phase.steps < steps_made: message = '\n' + ('-' * 50) + '\n' message += 'Phase {} (phase step {}, global step {}).' tf.logging.info(message.format(phase.name, phase_step, global_step)) # Populate book keeping tensors. phase.feed[self._reset] = (steps_in < steps_made) phase.feed[self._log] = ( phase.writer and self._is_every_steps(phase_step, phase.batch, phase.log_every)) phase.feed[self._report] = ( self._is_every_steps(phase_step, phase.batch, phase.report_every)) summary, mean_score, global_step, steps_made = sess.run( phase.op, phase.feed) if self._is_every_steps(phase_step, phase.batch, phase.checkpoint_every): self._store_checkpoint(sess, saver, global_step) if self._is_every_steps(phase_step, phase.batch, phase.report_every): yield mean_score if summary and phase.writer: # We want smaller phases to catch up at the beginnig of each epoch so # that their graphs are aligned. longest_phase = max(phase.steps for phase in self._phases) summary_step = epoch * longest_phase + steps_in phase.writer.add_summary(summary, summary_step)
[ "def", "run", "(", "self", ",", "sess", ",", "saver", ",", "max_step", "=", "None", ")", ":", "global_step", "=", "sess", ".", "run", "(", "self", ".", "_step", ")", "steps_made", "=", "1", "while", "True", ":", "if", "max_step", "and", "global_step", ">=", "max_step", ":", "break", "phase", ",", "epoch", ",", "steps_in", "=", "self", ".", "_find_current_phase", "(", "global_step", ")", "phase_step", "=", "epoch", "*", "phase", ".", "steps", "+", "steps_in", "if", "steps_in", "%", "phase", ".", "steps", "<", "steps_made", ":", "message", "=", "'\\n'", "+", "(", "'-'", "*", "50", ")", "+", "'\\n'", "message", "+=", "'Phase {} (phase step {}, global step {}).'", "tf", ".", "logging", ".", "info", "(", "message", ".", "format", "(", "phase", ".", "name", ",", "phase_step", ",", "global_step", ")", ")", "# Populate book keeping tensors.", "phase", ".", "feed", "[", "self", ".", "_reset", "]", "=", "(", "steps_in", "<", "steps_made", ")", "phase", ".", "feed", "[", "self", ".", "_log", "]", "=", "(", "phase", ".", "writer", "and", "self", ".", "_is_every_steps", "(", "phase_step", ",", "phase", ".", "batch", ",", "phase", ".", "log_every", ")", ")", "phase", ".", "feed", "[", "self", ".", "_report", "]", "=", "(", "self", ".", "_is_every_steps", "(", "phase_step", ",", "phase", ".", "batch", ",", "phase", ".", "report_every", ")", ")", "summary", ",", "mean_score", ",", "global_step", ",", "steps_made", "=", "sess", ".", "run", "(", "phase", ".", "op", ",", "phase", ".", "feed", ")", "if", "self", ".", "_is_every_steps", "(", "phase_step", ",", "phase", ".", "batch", ",", "phase", ".", "checkpoint_every", ")", ":", "self", ".", "_store_checkpoint", "(", "sess", ",", "saver", ",", "global_step", ")", "if", "self", ".", "_is_every_steps", "(", "phase_step", ",", "phase", ".", "batch", ",", "phase", ".", "report_every", ")", ":", "yield", "mean_score", "if", "summary", "and", "phase", ".", "writer", ":", "# We want smaller phases to catch up at the beginnig of each epoch so", "# that their graphs are aligned.", "longest_phase", "=", "max", "(", "phase", ".", "steps", "for", "phase", "in", "self", ".", "_phases", ")", "summary_step", "=", "epoch", "*", "longest_phase", "+", "steps_in", "phase", ".", "writer", ".", "add_summary", "(", "summary", ",", "summary_step", ")" ]
Run the loop schedule for a specified number of steps. Call the operation of the current phase until the global step reaches the specified maximum step. Phases are repeated over and over in the order they were added. Args: sess: Session to use to run the phase operation. saver: Saver used for checkpointing. max_step: Run the operations until the step reaches this limit. Yields: Reported mean scores.
[ "Run", "the", "loop", "schedule", "for", "a", "specified", "number", "of", "steps", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L108-L152
train
google-research/batch-ppo
agents/tools/loop.py
Loop._is_every_steps
def _is_every_steps(self, phase_step, batch, every): """Determine whether a periodic event should happen at this step. Args: phase_step: The incrementing step. batch: The number of steps progressed at once. every: The interval of the period. Returns: Boolean of whether the event should happen. """ if not every: return False covered_steps = range(phase_step, phase_step + batch) return any((step + 1) % every == 0 for step in covered_steps)
python
def _is_every_steps(self, phase_step, batch, every): """Determine whether a periodic event should happen at this step. Args: phase_step: The incrementing step. batch: The number of steps progressed at once. every: The interval of the period. Returns: Boolean of whether the event should happen. """ if not every: return False covered_steps = range(phase_step, phase_step + batch) return any((step + 1) % every == 0 for step in covered_steps)
[ "def", "_is_every_steps", "(", "self", ",", "phase_step", ",", "batch", ",", "every", ")", ":", "if", "not", "every", ":", "return", "False", "covered_steps", "=", "range", "(", "phase_step", ",", "phase_step", "+", "batch", ")", "return", "any", "(", "(", "step", "+", "1", ")", "%", "every", "==", "0", "for", "step", "in", "covered_steps", ")" ]
Determine whether a periodic event should happen at this step. Args: phase_step: The incrementing step. batch: The number of steps progressed at once. every: The interval of the period. Returns: Boolean of whether the event should happen.
[ "Determine", "whether", "a", "periodic", "event", "should", "happen", "at", "this", "step", "." ]
3d09705977bae4e7c3eb20339a3b384d2a5531e4
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L154-L168
train