repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
infothrill/python-launchd
launchd/launchctl.py
https://github.com/infothrill/python-launchd/blob/2cd50579e808851b116f5a26f9b871a32b65ce0e/launchd/launchctl.py#L55-L70
def properties(self): ''' This is a lazily loaded dictionary containing the launchd runtime information of the job in question. Internally, this is retrieved using ServiceManagement.SMJobCopyDictionary(). Keep in mind that some dictionary keys are not always present (for example 'PID'). If the job specified by the label cannot be found in launchd, then this method raises a ValueError exception. ''' if hasattr(self, '_nsproperties'): self._properties = convert_NSDictionary_to_dict(self._nsproperties) del self._nsproperties #self._nsproperties = None if self._properties is None: self.refresh() return self._properties
[ "def", "properties", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_nsproperties'", ")", ":", "self", ".", "_properties", "=", "convert_NSDictionary_to_dict", "(", "self", ".", "_nsproperties", ")", "del", "self", ".", "_nsproperties", "#self._nsproperties = None", "if", "self", ".", "_properties", "is", "None", ":", "self", ".", "refresh", "(", ")", "return", "self", ".", "_properties" ]
This is a lazily loaded dictionary containing the launchd runtime information of the job in question. Internally, this is retrieved using ServiceManagement.SMJobCopyDictionary(). Keep in mind that some dictionary keys are not always present (for example 'PID'). If the job specified by the label cannot be found in launchd, then this method raises a ValueError exception.
[ "This", "is", "a", "lazily", "loaded", "dictionary", "containing", "the", "launchd", "runtime", "information", "of", "the", "job", "in", "question", ".", "Internally", "this", "is", "retrieved", "using", "ServiceManagement", ".", "SMJobCopyDictionary", "()", ".", "Keep", "in", "mind", "that", "some", "dictionary", "keys", "are", "not", "always", "present", "(", "for", "example", "PID", ")", ".", "If", "the", "job", "specified", "by", "the", "label", "cannot", "be", "found", "in", "launchd", "then", "this", "method", "raises", "a", "ValueError", "exception", "." ]
python
train
dopefishh/pympi
pympi/Elan.py
https://github.com/dopefishh/pympi/blob/79c747cde45b5ba203ed93154d8c123ac9c3ef56/pympi/Elan.py#L185-L196
def add_external_ref(self, eid, etype, value): """Add an external reference. :param str eid: Name of the external reference. :param str etype: Type of the external reference, has to be in ``['iso12620', 'ecv', 'cve_id', 'lexen_id', 'resource_url']``. :param str value: Value of the external reference. :throws KeyError: if etype is not in the list of possible types. """ if etype not in self.ETYPES: raise KeyError('etype not in {}'.format(self.ETYPES)) self.external_refs[eid] = (etype, value)
[ "def", "add_external_ref", "(", "self", ",", "eid", ",", "etype", ",", "value", ")", ":", "if", "etype", "not", "in", "self", ".", "ETYPES", ":", "raise", "KeyError", "(", "'etype not in {}'", ".", "format", "(", "self", ".", "ETYPES", ")", ")", "self", ".", "external_refs", "[", "eid", "]", "=", "(", "etype", ",", "value", ")" ]
Add an external reference. :param str eid: Name of the external reference. :param str etype: Type of the external reference, has to be in ``['iso12620', 'ecv', 'cve_id', 'lexen_id', 'resource_url']``. :param str value: Value of the external reference. :throws KeyError: if etype is not in the list of possible types.
[ "Add", "an", "external", "reference", "." ]
python
test
andresriancho/w3af-api-client
w3af_api_client/scan.py
https://github.com/andresriancho/w3af-api-client/blob/adeb79bad75264d754de69f0bb981b366da96f32/w3af_api_client/scan.py#L56-L91
def stop(self, timeout=None): """ Send the GET request required to stop the scan If timeout is not specified we just send the request and return. When it is the method will wait for (at most) :timeout: seconds until the scan changes it's status/stops. If the timeout is reached then an exception is raised. :param timeout: The timeout in seconds :return: None, an exception is raised if the timeout is exceeded """ assert self.scan_id is not None, 'No scan_id has been set' # # Simple stop # if timeout is None: url = '/scans/%s/stop' % self.scan_id self.conn.send_request(url, method='GET') return # # Stop with timeout # self.stop() for _ in xrange(timeout): time.sleep(1) is_running = self.get_status()['is_running'] if not is_running: return msg = 'Failed to stop the scan in %s seconds' raise ScanStopTimeoutException(msg % timeout)
[ "def", "stop", "(", "self", ",", "timeout", "=", "None", ")", ":", "assert", "self", ".", "scan_id", "is", "not", "None", ",", "'No scan_id has been set'", "#", "# Simple stop", "#", "if", "timeout", "is", "None", ":", "url", "=", "'/scans/%s/stop'", "%", "self", ".", "scan_id", "self", ".", "conn", ".", "send_request", "(", "url", ",", "method", "=", "'GET'", ")", "return", "#", "# Stop with timeout", "#", "self", ".", "stop", "(", ")", "for", "_", "in", "xrange", "(", "timeout", ")", ":", "time", ".", "sleep", "(", "1", ")", "is_running", "=", "self", ".", "get_status", "(", ")", "[", "'is_running'", "]", "if", "not", "is_running", ":", "return", "msg", "=", "'Failed to stop the scan in %s seconds'", "raise", "ScanStopTimeoutException", "(", "msg", "%", "timeout", ")" ]
Send the GET request required to stop the scan If timeout is not specified we just send the request and return. When it is the method will wait for (at most) :timeout: seconds until the scan changes it's status/stops. If the timeout is reached then an exception is raised. :param timeout: The timeout in seconds :return: None, an exception is raised if the timeout is exceeded
[ "Send", "the", "GET", "request", "required", "to", "stop", "the", "scan" ]
python
train
welbornprod/colr
colr/controls.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/controls.py#L85-L106
def erase_display(method=EraseMethod.ALL_MOVE, file=sys.stdout): """ Clear the screen or part of the screen, and possibly moves the cursor to the "home" position (1, 1). See `method` argument below. Esc[<method>J Arguments: method: One of these possible values: EraseMethod.END or 0: Clear from cursor to the end of the screen. EraseMethod.START or 1: Clear from cursor to the start of the screen. EraseMethod.ALL_MOVE or 2: Clear all, and move home. EraseMethod.ALL_ERASE or 3: Clear all, and erase scrollback buffer. EraseMethod.ALL_MOVE_ERASE or 4: Like doing 2 and 3 in succession. This is a feature of Colr. It is not standard. Default: EraseMethod.ALL_MOVE (2) """ erase.display(method).write(file=file)
[ "def", "erase_display", "(", "method", "=", "EraseMethod", ".", "ALL_MOVE", ",", "file", "=", "sys", ".", "stdout", ")", ":", "erase", ".", "display", "(", "method", ")", ".", "write", "(", "file", "=", "file", ")" ]
Clear the screen or part of the screen, and possibly moves the cursor to the "home" position (1, 1). See `method` argument below. Esc[<method>J Arguments: method: One of these possible values: EraseMethod.END or 0: Clear from cursor to the end of the screen. EraseMethod.START or 1: Clear from cursor to the start of the screen. EraseMethod.ALL_MOVE or 2: Clear all, and move home. EraseMethod.ALL_ERASE or 3: Clear all, and erase scrollback buffer. EraseMethod.ALL_MOVE_ERASE or 4: Like doing 2 and 3 in succession. This is a feature of Colr. It is not standard. Default: EraseMethod.ALL_MOVE (2)
[ "Clear", "the", "screen", "or", "part", "of", "the", "screen", "and", "possibly", "moves", "the", "cursor", "to", "the", "home", "position", "(", "1", "1", ")", ".", "See", "method", "argument", "below", "." ]
python
train
ellisonleao/pyshorteners
pyshorteners/shorteners/bitly.py
https://github.com/ellisonleao/pyshorteners/blob/116155751c943f8d875c819d5a41db10515db18d/pyshorteners/shorteners/bitly.py#L77-L105
def total_clicks(self, url): """Total clicks implementation for Bit.ly Args: url: the URL you want to get the total clicks count Returns: An int containing the total clicks count Raises: BadAPIResponseException: If the API Returns an error as response """ url = self.clean_url(url) clicks_url = f'{self.api_url}v3/link/clicks' params = { 'link': url, 'access_token': self.api_key, 'format': 'txt' } response = self._get(clicks_url, params=params) if not response.ok: raise BadAPIResponseException(response.content) try: total_clicks = int(response.text) except (KeyError, TypeError) as e: logger.warning('Bad value from total_clicks response: %s', e) return 0 return total_clicks
[ "def", "total_clicks", "(", "self", ",", "url", ")", ":", "url", "=", "self", ".", "clean_url", "(", "url", ")", "clicks_url", "=", "f'{self.api_url}v3/link/clicks'", "params", "=", "{", "'link'", ":", "url", ",", "'access_token'", ":", "self", ".", "api_key", ",", "'format'", ":", "'txt'", "}", "response", "=", "self", ".", "_get", "(", "clicks_url", ",", "params", "=", "params", ")", "if", "not", "response", ".", "ok", ":", "raise", "BadAPIResponseException", "(", "response", ".", "content", ")", "try", ":", "total_clicks", "=", "int", "(", "response", ".", "text", ")", "except", "(", "KeyError", ",", "TypeError", ")", "as", "e", ":", "logger", ".", "warning", "(", "'Bad value from total_clicks response: %s'", ",", "e", ")", "return", "0", "return", "total_clicks" ]
Total clicks implementation for Bit.ly Args: url: the URL you want to get the total clicks count Returns: An int containing the total clicks count Raises: BadAPIResponseException: If the API Returns an error as response
[ "Total", "clicks", "implementation", "for", "Bit", ".", "ly", "Args", ":", "url", ":", "the", "URL", "you", "want", "to", "get", "the", "total", "clicks", "count" ]
python
train
SpriteLink/NIPAP
pynipap/pynipap.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/pynipap/pynipap.py#L1260-L1285
def remove(self, recursive = False): """ Remove the prefix. Maps to the function :py:func:`nipap.backend.Nipap.remove_prefix` in the backend. Please see the documentation for the backend function for information regarding input arguments and return values. """ xmlrpc = XMLRPCConnection() try: xmlrpc.connection.remove_prefix( { 'prefix': { 'id': self.id }, 'recursive': recursive, 'auth': self._auth_opts.options }) except xmlrpclib.Fault as xml_fault: raise _fault_to_exception(xml_fault) # update cache if self.id in _cache['Prefix']: del(_cache['Prefix'][self.id]) if self.pool is not None: if self.pool.id in _cache['Pool']: del _cache['Pool'][self.pool.id]
[ "def", "remove", "(", "self", ",", "recursive", "=", "False", ")", ":", "xmlrpc", "=", "XMLRPCConnection", "(", ")", "try", ":", "xmlrpc", ".", "connection", ".", "remove_prefix", "(", "{", "'prefix'", ":", "{", "'id'", ":", "self", ".", "id", "}", ",", "'recursive'", ":", "recursive", ",", "'auth'", ":", "self", ".", "_auth_opts", ".", "options", "}", ")", "except", "xmlrpclib", ".", "Fault", "as", "xml_fault", ":", "raise", "_fault_to_exception", "(", "xml_fault", ")", "# update cache", "if", "self", ".", "id", "in", "_cache", "[", "'Prefix'", "]", ":", "del", "(", "_cache", "[", "'Prefix'", "]", "[", "self", ".", "id", "]", ")", "if", "self", ".", "pool", "is", "not", "None", ":", "if", "self", ".", "pool", ".", "id", "in", "_cache", "[", "'Pool'", "]", ":", "del", "_cache", "[", "'Pool'", "]", "[", "self", ".", "pool", ".", "id", "]" ]
Remove the prefix. Maps to the function :py:func:`nipap.backend.Nipap.remove_prefix` in the backend. Please see the documentation for the backend function for information regarding input arguments and return values.
[ "Remove", "the", "prefix", "." ]
python
train
mattjj/pyslds
pyslds/states.py
https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/states.py#L730-L748
def mf_aBl(self): """ These are the expected log likelihoods (node potentials) as seen from the discrete states. """ mf_aBl = self._mf_aBl = np.zeros((self.T, self.num_states)) ids, dds, eds = self.init_dynamics_distns, self.dynamics_distns, \ self.emission_distns for idx, (d1, d2, d3) in enumerate(zip(ids, dds, eds)): mf_aBl[0,idx] = d1.expected_log_likelihood( stats=self.E_init_stats) mf_aBl[:-1,idx] += d2.expected_log_likelihood( stats=self.E_dynamics_stats) mf_aBl[:,idx] += d3.expected_log_likelihood( stats=self.E_emission_stats) mf_aBl[np.isnan(mf_aBl).any(1)] = 0. return mf_aBl
[ "def", "mf_aBl", "(", "self", ")", ":", "mf_aBl", "=", "self", ".", "_mf_aBl", "=", "np", ".", "zeros", "(", "(", "self", ".", "T", ",", "self", ".", "num_states", ")", ")", "ids", ",", "dds", ",", "eds", "=", "self", ".", "init_dynamics_distns", ",", "self", ".", "dynamics_distns", ",", "self", ".", "emission_distns", "for", "idx", ",", "(", "d1", ",", "d2", ",", "d3", ")", "in", "enumerate", "(", "zip", "(", "ids", ",", "dds", ",", "eds", ")", ")", ":", "mf_aBl", "[", "0", ",", "idx", "]", "=", "d1", ".", "expected_log_likelihood", "(", "stats", "=", "self", ".", "E_init_stats", ")", "mf_aBl", "[", ":", "-", "1", ",", "idx", "]", "+=", "d2", ".", "expected_log_likelihood", "(", "stats", "=", "self", ".", "E_dynamics_stats", ")", "mf_aBl", "[", ":", ",", "idx", "]", "+=", "d3", ".", "expected_log_likelihood", "(", "stats", "=", "self", ".", "E_emission_stats", ")", "mf_aBl", "[", "np", ".", "isnan", "(", "mf_aBl", ")", ".", "any", "(", "1", ")", "]", "=", "0.", "return", "mf_aBl" ]
These are the expected log likelihoods (node potentials) as seen from the discrete states.
[ "These", "are", "the", "expected", "log", "likelihoods", "(", "node", "potentials", ")", "as", "seen", "from", "the", "discrete", "states", "." ]
python
train
symphonyoss/python-symphony
examples/hello_world.py
https://github.com/symphonyoss/python-symphony/blob/b939f35fbda461183ec0c01790c754f89a295be0/examples/hello_world.py#L19-L29
def main(): ''' main program loop ''' conn = symphony.Config('example-bot.cfg') # connect to pod agent, pod, symphony_sid = conn.connect() agent.test_echo('test') # main loop msgFormat = 'MESSAGEML' message = '<messageML> hello world. </messageML>' # send message agent.send_message(symphony_sid, msgFormat, message)
[ "def", "main", "(", ")", ":", "conn", "=", "symphony", ".", "Config", "(", "'example-bot.cfg'", ")", "# connect to pod", "agent", ",", "pod", ",", "symphony_sid", "=", "conn", ".", "connect", "(", ")", "agent", ".", "test_echo", "(", "'test'", ")", "# main loop", "msgFormat", "=", "'MESSAGEML'", "message", "=", "'<messageML> hello world. </messageML>'", "# send message", "agent", ".", "send_message", "(", "symphony_sid", ",", "msgFormat", ",", "message", ")" ]
main program loop
[ "main", "program", "loop" ]
python
train
MacHu-GWU/pyknackhq-project
pyknackhq/client.py
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L278-L292
def delete_one(self, id_): """Delete one record. Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete :param id_: record id_ **中文文档** 删除一条记录 """ url = "https://api.knackhq.com/v1/objects/%s/records/%s" % ( self.key, id_) res = self.delete(url) return res
[ "def", "delete_one", "(", "self", ",", "id_", ")", ":", "url", "=", "\"https://api.knackhq.com/v1/objects/%s/records/%s\"", "%", "(", "self", ".", "key", ",", "id_", ")", "res", "=", "self", ".", "delete", "(", "url", ")", "return", "res" ]
Delete one record. Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete :param id_: record id_ **中文文档** 删除一条记录
[ "Delete", "one", "record", ".", "Ref", ":", "http", ":", "//", "helpdesk", ".", "knackhq", ".", "com", "/", "support", "/", "solutions", "/", "articles", "/", "5000446111", "-", "api", "-", "reference", "-", "root", "-", "access#delete", ":", "param", "id_", ":", "record", "id_", "**", "中文文档", "**", "删除一条记录" ]
python
train
The-Politico/politico-civic-election
election/models/election.py
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L150-L160
def get_delegates(self): """ Get all pledged delegates for any candidate in this election. """ candidate_elections = CandidateElection.objects.filter(election=self) delegates = None for ce in candidate_elections: delegates = delegates | ce.delegates.all() return delegates
[ "def", "get_delegates", "(", "self", ")", ":", "candidate_elections", "=", "CandidateElection", ".", "objects", ".", "filter", "(", "election", "=", "self", ")", "delegates", "=", "None", "for", "ce", "in", "candidate_elections", ":", "delegates", "=", "delegates", "|", "ce", ".", "delegates", ".", "all", "(", ")", "return", "delegates" ]
Get all pledged delegates for any candidate in this election.
[ "Get", "all", "pledged", "delegates", "for", "any", "candidate", "in", "this", "election", "." ]
python
train
pyviz/holoviews
holoviews/plotting/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/util.py#L207-L237
def split_dmap_overlay(obj, depth=0): """ Splits a DynamicMap into the original component layers it was constructed from by traversing the graph to search for dynamically overlaid components (i.e. constructed by using * on a DynamicMap). Useful for assigning subplots of an OverlayPlot the streams that are responsible for driving their updates. Allows the OverlayPlot to determine if a stream update should redraw a particular subplot. """ layers = [] if isinstance(obj, DynamicMap): if issubclass(obj.type, NdOverlay) and not depth: for v in obj.last.values(): layers.append(obj) elif issubclass(obj.type, Overlay): if obj.callback.inputs and is_dynamic_overlay(obj): for inp in obj.callback.inputs: layers += split_dmap_overlay(inp, depth+1) else: for v in obj.last.values(): layers.append(obj) else: layers.append(obj) return layers if isinstance(obj, Overlay): for k, v in obj.items(): layers.append(v) else: layers.append(obj) return layers
[ "def", "split_dmap_overlay", "(", "obj", ",", "depth", "=", "0", ")", ":", "layers", "=", "[", "]", "if", "isinstance", "(", "obj", ",", "DynamicMap", ")", ":", "if", "issubclass", "(", "obj", ".", "type", ",", "NdOverlay", ")", "and", "not", "depth", ":", "for", "v", "in", "obj", ".", "last", ".", "values", "(", ")", ":", "layers", ".", "append", "(", "obj", ")", "elif", "issubclass", "(", "obj", ".", "type", ",", "Overlay", ")", ":", "if", "obj", ".", "callback", ".", "inputs", "and", "is_dynamic_overlay", "(", "obj", ")", ":", "for", "inp", "in", "obj", ".", "callback", ".", "inputs", ":", "layers", "+=", "split_dmap_overlay", "(", "inp", ",", "depth", "+", "1", ")", "else", ":", "for", "v", "in", "obj", ".", "last", ".", "values", "(", ")", ":", "layers", ".", "append", "(", "obj", ")", "else", ":", "layers", ".", "append", "(", "obj", ")", "return", "layers", "if", "isinstance", "(", "obj", ",", "Overlay", ")", ":", "for", "k", ",", "v", "in", "obj", ".", "items", "(", ")", ":", "layers", ".", "append", "(", "v", ")", "else", ":", "layers", ".", "append", "(", "obj", ")", "return", "layers" ]
Splits a DynamicMap into the original component layers it was constructed from by traversing the graph to search for dynamically overlaid components (i.e. constructed by using * on a DynamicMap). Useful for assigning subplots of an OverlayPlot the streams that are responsible for driving their updates. Allows the OverlayPlot to determine if a stream update should redraw a particular subplot.
[ "Splits", "a", "DynamicMap", "into", "the", "original", "component", "layers", "it", "was", "constructed", "from", "by", "traversing", "the", "graph", "to", "search", "for", "dynamically", "overlaid", "components", "(", "i", ".", "e", ".", "constructed", "by", "using", "*", "on", "a", "DynamicMap", ")", ".", "Useful", "for", "assigning", "subplots", "of", "an", "OverlayPlot", "the", "streams", "that", "are", "responsible", "for", "driving", "their", "updates", ".", "Allows", "the", "OverlayPlot", "to", "determine", "if", "a", "stream", "update", "should", "redraw", "a", "particular", "subplot", "." ]
python
train
bpython/curtsies
examples/tttplaybitboard.py
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/examples/tttplaybitboard.py#L139-L142
def is_won(grid): "Did the latest move win the game?" p, q = grid return any(way == (way & q) for way in ways_to_win)
[ "def", "is_won", "(", "grid", ")", ":", "p", ",", "q", "=", "grid", "return", "any", "(", "way", "==", "(", "way", "&", "q", ")", "for", "way", "in", "ways_to_win", ")" ]
Did the latest move win the game?
[ "Did", "the", "latest", "move", "win", "the", "game?" ]
python
train
PmagPy/PmagPy
pmagpy/ipmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2542-L2563
def equi(map_axis, centerlon, centerlat, radius, color, alpha=1.0): """ This function enables A95 error ellipses to be drawn in cartopy around paleomagnetic poles in conjunction with shoot (modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/). """ if not has_cartopy: print('-W- cartopy must be installed to run ipmag.equi') return glon1 = centerlon glat1 = centerlat X = [] Y = [] for azimuth in range(0, 360): glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius) X.append(glon2) Y.append(glat2) X.append(X[0]) Y.append(Y[0]) plt.plot(X[::-1], Y[::-1], color=color, transform=ccrs.Geodetic(), alpha=alpha)
[ "def", "equi", "(", "map_axis", ",", "centerlon", ",", "centerlat", ",", "radius", ",", "color", ",", "alpha", "=", "1.0", ")", ":", "if", "not", "has_cartopy", ":", "print", "(", "'-W- cartopy must be installed to run ipmag.equi'", ")", "return", "glon1", "=", "centerlon", "glat1", "=", "centerlat", "X", "=", "[", "]", "Y", "=", "[", "]", "for", "azimuth", "in", "range", "(", "0", ",", "360", ")", ":", "glon2", ",", "glat2", ",", "baz", "=", "shoot", "(", "glon1", ",", "glat1", ",", "azimuth", ",", "radius", ")", "X", ".", "append", "(", "glon2", ")", "Y", ".", "append", "(", "glat2", ")", "X", ".", "append", "(", "X", "[", "0", "]", ")", "Y", ".", "append", "(", "Y", "[", "0", "]", ")", "plt", ".", "plot", "(", "X", "[", ":", ":", "-", "1", "]", ",", "Y", "[", ":", ":", "-", "1", "]", ",", "color", "=", "color", ",", "transform", "=", "ccrs", ".", "Geodetic", "(", ")", ",", "alpha", "=", "alpha", ")" ]
This function enables A95 error ellipses to be drawn in cartopy around paleomagnetic poles in conjunction with shoot (modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/).
[ "This", "function", "enables", "A95", "error", "ellipses", "to", "be", "drawn", "in", "cartopy", "around", "paleomagnetic", "poles", "in", "conjunction", "with", "shoot", "(", "modified", "from", ":", "http", ":", "//", "www", ".", "geophysique", ".", "be", "/", "2011", "/", "02", "/", "20", "/", "matplotlib", "-", "basemap", "-", "tutorial", "-", "09", "-", "drawing", "-", "circles", "/", ")", "." ]
python
train
HazyResearch/fonduer
src/fonduer/learning/disc_models/modules/sparse_linear.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/modules/sparse_linear.py#L42-L51
def reset_parameters(self): """Reinitiate the weight parameters. """ stdv = 1.0 / math.sqrt(self.num_features) self.weight.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) if self.padding_idx is not None: self.weight.weight.data[self.padding_idx].fill_(0)
[ "def", "reset_parameters", "(", "self", ")", ":", "stdv", "=", "1.0", "/", "math", ".", "sqrt", "(", "self", ".", "num_features", ")", "self", ".", "weight", ".", "weight", ".", "data", ".", "uniform_", "(", "-", "stdv", ",", "stdv", ")", "if", "self", ".", "bias", "is", "not", "None", ":", "self", ".", "bias", ".", "data", ".", "uniform_", "(", "-", "stdv", ",", "stdv", ")", "if", "self", ".", "padding_idx", "is", "not", "None", ":", "self", ".", "weight", ".", "weight", ".", "data", "[", "self", ".", "padding_idx", "]", ".", "fill_", "(", "0", ")" ]
Reinitiate the weight parameters.
[ "Reinitiate", "the", "weight", "parameters", "." ]
python
train
PmagPy/PmagPy
pmagpy/nlt.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/nlt.py#L52-L70
def TRMinv(m,a,b): WARN = True # Warn, rather than stop if I encounter a NaN... """ Calculate applied field from TRM using tanh relationship TRMinv(m)=(1/b)*atanh(m/a) """ if float(a)==0: print('ERROR: TRMinv: a==0.') if not WARN : sys.exit() if float(b)==0: print('ERROR: TRMinv: b==0.') if not WARN : sys.exit() x = (old_div(float(m), float(a))) if (1-x)<=0: print('ERROR: TRMinv: (1-x)==0.') return -1 if not WARN : sys.exit() f = (old_div(1.,float(b))) * 0.5 * math.log (old_div((1+x), (1-x))) return float(f)
[ "def", "TRMinv", "(", "m", ",", "a", ",", "b", ")", ":", "WARN", "=", "True", "# Warn, rather than stop if I encounter a NaN...", "if", "float", "(", "a", ")", "==", "0", ":", "print", "(", "'ERROR: TRMinv: a==0.'", ")", "if", "not", "WARN", ":", "sys", ".", "exit", "(", ")", "if", "float", "(", "b", ")", "==", "0", ":", "print", "(", "'ERROR: TRMinv: b==0.'", ")", "if", "not", "WARN", ":", "sys", ".", "exit", "(", ")", "x", "=", "(", "old_div", "(", "float", "(", "m", ")", ",", "float", "(", "a", ")", ")", ")", "if", "(", "1", "-", "x", ")", "<=", "0", ":", "print", "(", "'ERROR: TRMinv: (1-x)==0.'", ")", "return", "-", "1", "if", "not", "WARN", ":", "sys", ".", "exit", "(", ")", "f", "=", "(", "old_div", "(", "1.", ",", "float", "(", "b", ")", ")", ")", "*", "0.5", "*", "math", ".", "log", "(", "old_div", "(", "(", "1", "+", "x", ")", ",", "(", "1", "-", "x", ")", ")", ")", "return", "float", "(", "f", ")" ]
Calculate applied field from TRM using tanh relationship TRMinv(m)=(1/b)*atanh(m/a)
[ "Calculate", "applied", "field", "from", "TRM", "using", "tanh", "relationship", "TRMinv", "(", "m", ")", "=", "(", "1", "/", "b", ")", "*", "atanh", "(", "m", "/", "a", ")" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/app/canvas.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/app/canvas.py#L520-L541
def render(self): """ Render the canvas to an offscreen buffer and return the image array. Returns ------- image : array Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the upper-left corner of the rendered region. """ self.set_current() size = self.physical_size fbo = FrameBuffer(color=RenderBuffer(size[::-1]), depth=RenderBuffer(size[::-1])) try: fbo.activate() self.events.draw() return fbo.read() finally: fbo.deactivate()
[ "def", "render", "(", "self", ")", ":", "self", ".", "set_current", "(", ")", "size", "=", "self", ".", "physical_size", "fbo", "=", "FrameBuffer", "(", "color", "=", "RenderBuffer", "(", "size", "[", ":", ":", "-", "1", "]", ")", ",", "depth", "=", "RenderBuffer", "(", "size", "[", ":", ":", "-", "1", "]", ")", ")", "try", ":", "fbo", ".", "activate", "(", ")", "self", ".", "events", ".", "draw", "(", ")", "return", "fbo", ".", "read", "(", ")", "finally", ":", "fbo", ".", "deactivate", "(", ")" ]
Render the canvas to an offscreen buffer and return the image array. Returns ------- image : array Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the upper-left corner of the rendered region.
[ "Render", "the", "canvas", "to", "an", "offscreen", "buffer", "and", "return", "the", "image", "array", "." ]
python
train
gem/oq-engine
openquake/calculators/base.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/base.py#L534-L566
def read_exposure(self, haz_sitecol=None): # after load_risk_model """ Read the exposure, the riskmodel and update the attributes .sitecol, .assetcol """ with self.monitor('reading exposure', autoflush=True): self.sitecol, self.assetcol, discarded = ( readinput.get_sitecol_assetcol( self.oqparam, haz_sitecol, self.riskmodel.loss_types)) if len(discarded): self.datastore['discarded'] = discarded if hasattr(self, 'rup'): # this is normal for the case of scenario from rupture logging.info('%d assets were discarded because too far ' 'from the rupture; use `oq show discarded` ' 'to show them and `oq plot_assets` to plot ' 'them' % len(discarded)) elif not self.oqparam.discard_assets: # raise an error self.datastore['sitecol'] = self.sitecol self.datastore['assetcol'] = self.assetcol raise RuntimeError( '%d assets were discarded; use `oq show discarded` to' ' show them and `oq plot_assets` to plot them' % len(discarded)) # reduce the riskmodel to the relevant taxonomies taxonomies = set(taxo for taxo in self.assetcol.tagcol.taxonomy if taxo != '?') if len(self.riskmodel.taxonomies) > len(taxonomies): logging.info('Reducing risk model from %d to %d taxonomies', len(self.riskmodel.taxonomies), len(taxonomies)) self.riskmodel = self.riskmodel.reduce(taxonomies) return readinput.exposure
[ "def", "read_exposure", "(", "self", ",", "haz_sitecol", "=", "None", ")", ":", "# after load_risk_model", "with", "self", ".", "monitor", "(", "'reading exposure'", ",", "autoflush", "=", "True", ")", ":", "self", ".", "sitecol", ",", "self", ".", "assetcol", ",", "discarded", "=", "(", "readinput", ".", "get_sitecol_assetcol", "(", "self", ".", "oqparam", ",", "haz_sitecol", ",", "self", ".", "riskmodel", ".", "loss_types", ")", ")", "if", "len", "(", "discarded", ")", ":", "self", ".", "datastore", "[", "'discarded'", "]", "=", "discarded", "if", "hasattr", "(", "self", ",", "'rup'", ")", ":", "# this is normal for the case of scenario from rupture", "logging", ".", "info", "(", "'%d assets were discarded because too far '", "'from the rupture; use `oq show discarded` '", "'to show them and `oq plot_assets` to plot '", "'them'", "%", "len", "(", "discarded", ")", ")", "elif", "not", "self", ".", "oqparam", ".", "discard_assets", ":", "# raise an error", "self", ".", "datastore", "[", "'sitecol'", "]", "=", "self", ".", "sitecol", "self", ".", "datastore", "[", "'assetcol'", "]", "=", "self", ".", "assetcol", "raise", "RuntimeError", "(", "'%d assets were discarded; use `oq show discarded` to'", "' show them and `oq plot_assets` to plot them'", "%", "len", "(", "discarded", ")", ")", "# reduce the riskmodel to the relevant taxonomies", "taxonomies", "=", "set", "(", "taxo", "for", "taxo", "in", "self", ".", "assetcol", ".", "tagcol", ".", "taxonomy", "if", "taxo", "!=", "'?'", ")", "if", "len", "(", "self", ".", "riskmodel", ".", "taxonomies", ")", ">", "len", "(", "taxonomies", ")", ":", "logging", ".", "info", "(", "'Reducing risk model from %d to %d taxonomies'", ",", "len", "(", "self", ".", "riskmodel", ".", "taxonomies", ")", ",", "len", "(", "taxonomies", ")", ")", "self", ".", "riskmodel", "=", "self", ".", "riskmodel", ".", "reduce", "(", "taxonomies", ")", "return", "readinput", ".", "exposure" ]
Read the exposure, the riskmodel and update the attributes .sitecol, .assetcol
[ "Read", "the", "exposure", "the", "riskmodel", "and", "update", "the", "attributes", ".", "sitecol", ".", "assetcol" ]
python
train
MatterMiners/cobald
cobald/daemon/config/mapping.py
https://github.com/MatterMiners/cobald/blob/264138de4382d1c9b53fabcbc6660e10b33a914d/cobald/daemon/config/mapping.py#L55-L67
def construct(self, mapping: dict, **kwargs): """ Construct an object from a mapping :param mapping: the constructor definition, with ``__type__`` name and keyword arguments :param kwargs: additional keyword arguments to pass to the constructor """ assert '__type__' not in kwargs and '__args__' not in kwargs mapping = {**mapping, **kwargs} factory_fqdn = mapping.pop('__type__') factory = self.load_name(factory_fqdn) args = mapping.pop('__args__', []) return factory(*args, **mapping)
[ "def", "construct", "(", "self", ",", "mapping", ":", "dict", ",", "*", "*", "kwargs", ")", ":", "assert", "'__type__'", "not", "in", "kwargs", "and", "'__args__'", "not", "in", "kwargs", "mapping", "=", "{", "*", "*", "mapping", ",", "*", "*", "kwargs", "}", "factory_fqdn", "=", "mapping", ".", "pop", "(", "'__type__'", ")", "factory", "=", "self", ".", "load_name", "(", "factory_fqdn", ")", "args", "=", "mapping", ".", "pop", "(", "'__args__'", ",", "[", "]", ")", "return", "factory", "(", "*", "args", ",", "*", "*", "mapping", ")" ]
Construct an object from a mapping :param mapping: the constructor definition, with ``__type__`` name and keyword arguments :param kwargs: additional keyword arguments to pass to the constructor
[ "Construct", "an", "object", "from", "a", "mapping" ]
python
train
totalgood/pugnlp
src/pugnlp/detector_morse.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/detector_morse.py#L198-L212
def segments(self, text): """ Given a string of `text`, return a generator yielding each hypothesized sentence string """ start = 0 for (L, P, R, B, end) in Detector.candidates(text): # if there's already a newline there, we have nothing to do if B: continue if self.predict(L, P, R): yield text[start:end].rstrip() start = end # otherwise, there's probably not a sentence boundary here yield text[start:].rstrip()
[ "def", "segments", "(", "self", ",", "text", ")", ":", "start", "=", "0", "for", "(", "L", ",", "P", ",", "R", ",", "B", ",", "end", ")", "in", "Detector", ".", "candidates", "(", "text", ")", ":", "# if there's already a newline there, we have nothing to do", "if", "B", ":", "continue", "if", "self", ".", "predict", "(", "L", ",", "P", ",", "R", ")", ":", "yield", "text", "[", "start", ":", "end", "]", ".", "rstrip", "(", ")", "start", "=", "end", "# otherwise, there's probably not a sentence boundary here", "yield", "text", "[", "start", ":", "]", ".", "rstrip", "(", ")" ]
Given a string of `text`, return a generator yielding each hypothesized sentence string
[ "Given", "a", "string", "of", "text", "return", "a", "generator", "yielding", "each", "hypothesized", "sentence", "string" ]
python
train
shoebot/shoebot
lib/graph/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/graph/__init__.py#L512-L518
def betweenness_centrality(self, normalized=True): """ Calculates betweenness centrality and returns an node id -> weight dictionary. Node betweenness weights are updated in the process. """ bc = proximity.brandes_betweenness_centrality(self, normalized) for id, w in bc.iteritems(): self[id]._betweenness = w return bc
[ "def", "betweenness_centrality", "(", "self", ",", "normalized", "=", "True", ")", ":", "bc", "=", "proximity", ".", "brandes_betweenness_centrality", "(", "self", ",", "normalized", ")", "for", "id", ",", "w", "in", "bc", ".", "iteritems", "(", ")", ":", "self", "[", "id", "]", ".", "_betweenness", "=", "w", "return", "bc" ]
Calculates betweenness centrality and returns an node id -> weight dictionary. Node betweenness weights are updated in the process.
[ "Calculates", "betweenness", "centrality", "and", "returns", "an", "node", "id", "-", ">", "weight", "dictionary", ".", "Node", "betweenness", "weights", "are", "updated", "in", "the", "process", "." ]
python
valid
fastai/fastai
fastai/text/transform.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/text/transform.py#L132-L134
def textify(self, nums:Collection[int], sep=' ') -> List[str]: "Convert a list of `nums` to their tokens." return sep.join([self.itos[i] for i in nums]) if sep is not None else [self.itos[i] for i in nums]
[ "def", "textify", "(", "self", ",", "nums", ":", "Collection", "[", "int", "]", ",", "sep", "=", "' '", ")", "->", "List", "[", "str", "]", ":", "return", "sep", ".", "join", "(", "[", "self", ".", "itos", "[", "i", "]", "for", "i", "in", "nums", "]", ")", "if", "sep", "is", "not", "None", "else", "[", "self", ".", "itos", "[", "i", "]", "for", "i", "in", "nums", "]" ]
Convert a list of `nums` to their tokens.
[ "Convert", "a", "list", "of", "nums", "to", "their", "tokens", "." ]
python
train
nfcpy/nfcpy
src/nfc/clf/acr122.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/clf/acr122.py#L189-L196
def set_buzzer_and_led_to_active(self, duration_in_ms=300): """Turn on buzzer and set LED to red only. The timeout here must exceed the total buzzer/flash duration defined in bytes 5-8. """ duration_in_tenths_of_second = min(duration_in_ms / 100, 255) timeout_in_seconds = (duration_in_tenths_of_second + 1) / 10.0 data = "FF00400D04{:02X}000101".format(duration_in_tenths_of_second) self.ccid_xfr_block(bytearray.fromhex(data), timeout=timeout_in_seconds)
[ "def", "set_buzzer_and_led_to_active", "(", "self", ",", "duration_in_ms", "=", "300", ")", ":", "duration_in_tenths_of_second", "=", "min", "(", "duration_in_ms", "/", "100", ",", "255", ")", "timeout_in_seconds", "=", "(", "duration_in_tenths_of_second", "+", "1", ")", "/", "10.0", "data", "=", "\"FF00400D04{:02X}000101\"", ".", "format", "(", "duration_in_tenths_of_second", ")", "self", ".", "ccid_xfr_block", "(", "bytearray", ".", "fromhex", "(", "data", ")", ",", "timeout", "=", "timeout_in_seconds", ")" ]
Turn on buzzer and set LED to red only. The timeout here must exceed the total buzzer/flash duration defined in bytes 5-8.
[ "Turn", "on", "buzzer", "and", "set", "LED", "to", "red", "only", ".", "The", "timeout", "here", "must", "exceed", "the", "total", "buzzer", "/", "flash", "duration", "defined", "in", "bytes", "5", "-", "8", "." ]
python
train
polyaxon/polyaxon-cli
polyaxon_cli/cli/experiment_group.py
https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/cli/experiment_group.py#L116-L160
def update(ctx, name, description, tags): """Update experiment group. Uses [Caching](/references/polyaxon-cli/#caching) Example: \b ```bash $ polyaxon group -g 2 update --description="new description for this group" ``` \b ```bash $ polyaxon update --tags="foo, bar" ``` """ user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'), ctx.obj.get('group')) update_dict = {} if name: update_dict['name'] = name if description: update_dict['description'] = description tags = validate_tags(tags) if tags: update_dict['tags'] = tags if not update_dict: Printer.print_warning('No argument was provided to update the experiment group.') sys.exit(0) try: response = PolyaxonClient().experiment_group.update_experiment_group( user, project_name, _group, update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not update experiment group `{}`.'.format(_group)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiment group updated.") get_group_details(response)
[ "def", "update", "(", "ctx", ",", "name", ",", "description", ",", "tags", ")", ":", "user", ",", "project_name", ",", "_group", "=", "get_project_group_or_local", "(", "ctx", ".", "obj", ".", "get", "(", "'project'", ")", ",", "ctx", ".", "obj", ".", "get", "(", "'group'", ")", ")", "update_dict", "=", "{", "}", "if", "name", ":", "update_dict", "[", "'name'", "]", "=", "name", "if", "description", ":", "update_dict", "[", "'description'", "]", "=", "description", "tags", "=", "validate_tags", "(", "tags", ")", "if", "tags", ":", "update_dict", "[", "'tags'", "]", "=", "tags", "if", "not", "update_dict", ":", "Printer", ".", "print_warning", "(", "'No argument was provided to update the experiment group.'", ")", "sys", ".", "exit", "(", "0", ")", "try", ":", "response", "=", "PolyaxonClient", "(", ")", ".", "experiment_group", ".", "update_experiment_group", "(", "user", ",", "project_name", ",", "_group", ",", "update_dict", ")", "except", "(", "PolyaxonHTTPError", ",", "PolyaxonShouldExitError", ",", "PolyaxonClientException", ")", "as", "e", ":", "Printer", ".", "print_error", "(", "'Could not update experiment group `{}`.'", ".", "format", "(", "_group", ")", ")", "Printer", ".", "print_error", "(", "'Error message `{}`.'", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "1", ")", "Printer", ".", "print_success", "(", "\"Experiment group updated.\"", ")", "get_group_details", "(", "response", ")" ]
Update experiment group. Uses [Caching](/references/polyaxon-cli/#caching) Example: \b ```bash $ polyaxon group -g 2 update --description="new description for this group" ``` \b ```bash $ polyaxon update --tags="foo, bar" ```
[ "Update", "experiment", "group", "." ]
python
valid
sqlalchemy-redshift/sqlalchemy-redshift
sqlalchemy_redshift/dialect.py
https://github.com/sqlalchemy-redshift/sqlalchemy-redshift/blob/b1a24872da0c8151aa60da4524605b6243d8d765/sqlalchemy_redshift/dialect.py#L419-L437
def get_columns(self, connection, table_name, schema=None, **kw): """ Return information about columns in `table_name`. Overrides interface :meth:`~sqlalchemy.engine.interfaces.Dialect.get_columns`. """ cols = self._get_redshift_columns(connection, table_name, schema, **kw) if not self._domains: self._domains = self._load_domains(connection) domains = self._domains columns = [] for col in cols: column_info = self._get_column_info( name=col.name, format_type=col.format_type, default=col.default, notnull=col.notnull, domains=domains, enums=[], schema=col.schema, encode=col.encode) columns.append(column_info) return columns
[ "def", "get_columns", "(", "self", ",", "connection", ",", "table_name", ",", "schema", "=", "None", ",", "*", "*", "kw", ")", ":", "cols", "=", "self", ".", "_get_redshift_columns", "(", "connection", ",", "table_name", ",", "schema", ",", "*", "*", "kw", ")", "if", "not", "self", ".", "_domains", ":", "self", ".", "_domains", "=", "self", ".", "_load_domains", "(", "connection", ")", "domains", "=", "self", ".", "_domains", "columns", "=", "[", "]", "for", "col", "in", "cols", ":", "column_info", "=", "self", ".", "_get_column_info", "(", "name", "=", "col", ".", "name", ",", "format_type", "=", "col", ".", "format_type", ",", "default", "=", "col", ".", "default", ",", "notnull", "=", "col", ".", "notnull", ",", "domains", "=", "domains", ",", "enums", "=", "[", "]", ",", "schema", "=", "col", ".", "schema", ",", "encode", "=", "col", ".", "encode", ")", "columns", ".", "append", "(", "column_info", ")", "return", "columns" ]
Return information about columns in `table_name`. Overrides interface :meth:`~sqlalchemy.engine.interfaces.Dialect.get_columns`.
[ "Return", "information", "about", "columns", "in", "table_name", "." ]
python
train
apache/incubator-mxnet
example/gluon/audio/urban_sounds/train.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/audio/urban_sounds/train.py#L29-L37
def evaluate_accuracy(data_iterator, net): """Function to evaluate accuracy of any data iterator passed to it as an argument""" acc = mx.metric.Accuracy() for data, label in data_iterator: output = net(data) predictions = nd.argmax(output, axis=1) predictions = predictions.reshape((-1, 1)) acc.update(preds=predictions, labels=label) return acc.get()[1]
[ "def", "evaluate_accuracy", "(", "data_iterator", ",", "net", ")", ":", "acc", "=", "mx", ".", "metric", ".", "Accuracy", "(", ")", "for", "data", ",", "label", "in", "data_iterator", ":", "output", "=", "net", "(", "data", ")", "predictions", "=", "nd", ".", "argmax", "(", "output", ",", "axis", "=", "1", ")", "predictions", "=", "predictions", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "acc", ".", "update", "(", "preds", "=", "predictions", ",", "labels", "=", "label", ")", "return", "acc", ".", "get", "(", ")", "[", "1", "]" ]
Function to evaluate accuracy of any data iterator passed to it as an argument
[ "Function", "to", "evaluate", "accuracy", "of", "any", "data", "iterator", "passed", "to", "it", "as", "an", "argument" ]
python
train
pkgw/pwkit
pwkit/astutil.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/astutil.py#L718-L742
def load_skyfield_data(): """Load data files used in Skyfield. This will download files from the internet if they haven't been downloaded before. Skyfield downloads files to the current directory by default, which is not ideal. Here we abuse astropy and use its cache directory to cache the data files per-user. If we start downloading files in other places in pwkit we should maybe make this system more generic. And the dep on astropy is not at all necessary. Skyfield will print out a progress bar as it downloads things. Returns ``(planets, ts)``, the standard Skyfield ephemeris and timescale data files. """ import os.path from astropy.config import paths from skyfield.api import Loader cache_dir = os.path.join(paths.get_cache_dir(), 'pwkit') loader = Loader(cache_dir) planets = loader('de421.bsp') ts = loader.timescale() return planets, ts
[ "def", "load_skyfield_data", "(", ")", ":", "import", "os", ".", "path", "from", "astropy", ".", "config", "import", "paths", "from", "skyfield", ".", "api", "import", "Loader", "cache_dir", "=", "os", ".", "path", ".", "join", "(", "paths", ".", "get_cache_dir", "(", ")", ",", "'pwkit'", ")", "loader", "=", "Loader", "(", "cache_dir", ")", "planets", "=", "loader", "(", "'de421.bsp'", ")", "ts", "=", "loader", ".", "timescale", "(", ")", "return", "planets", ",", "ts" ]
Load data files used in Skyfield. This will download files from the internet if they haven't been downloaded before. Skyfield downloads files to the current directory by default, which is not ideal. Here we abuse astropy and use its cache directory to cache the data files per-user. If we start downloading files in other places in pwkit we should maybe make this system more generic. And the dep on astropy is not at all necessary. Skyfield will print out a progress bar as it downloads things. Returns ``(planets, ts)``, the standard Skyfield ephemeris and timescale data files.
[ "Load", "data", "files", "used", "in", "Skyfield", ".", "This", "will", "download", "files", "from", "the", "internet", "if", "they", "haven", "t", "been", "downloaded", "before", "." ]
python
train
KelSolaar/Manager
manager/components_manager.py
https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L509-L520
def author(self, value): """ Setter for **self.__author** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "author", value) self.__author = value
[ "def", "author", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"author\"", ",", "value", ")", "self", ".", "__author", "=", "value" ]
Setter for **self.__author** attribute. :param value: Attribute value. :type value: unicode
[ "Setter", "for", "**", "self", ".", "__author", "**", "attribute", "." ]
python
train
binux/pyspider
pyspider/processor/project_module.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/processor/project_module.py#L108-L116
def _check_projects(self): '''Check projects by last update time''' for project in self.projectdb.check_update(self.last_check_projects, ['name', 'updatetime']): if project['name'] not in self.projects: continue if project['updatetime'] > self.projects[project['name']]['info'].get('updatetime', 0): self._update_project(project['name']) self.last_check_projects = time.time()
[ "def", "_check_projects", "(", "self", ")", ":", "for", "project", "in", "self", ".", "projectdb", ".", "check_update", "(", "self", ".", "last_check_projects", ",", "[", "'name'", ",", "'updatetime'", "]", ")", ":", "if", "project", "[", "'name'", "]", "not", "in", "self", ".", "projects", ":", "continue", "if", "project", "[", "'updatetime'", "]", ">", "self", ".", "projects", "[", "project", "[", "'name'", "]", "]", "[", "'info'", "]", ".", "get", "(", "'updatetime'", ",", "0", ")", ":", "self", ".", "_update_project", "(", "project", "[", "'name'", "]", ")", "self", ".", "last_check_projects", "=", "time", ".", "time", "(", ")" ]
Check projects by last update time
[ "Check", "projects", "by", "last", "update", "time" ]
python
train
broadinstitute/fiss
firecloud/api.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/api.py#L1408-L1420
def add_user_to_group(group, role, email): """Add a user to a group the caller owns Args: group (str): Group name role (str) : Role of user for group; either 'member' or 'admin' email (str): Email of user or group to add Swagger: https://api.firecloud.org/#!/Groups/addUserToGroup """ uri = "groups/{0}/{1}/{2}".format(group, role, email) return __put(uri)
[ "def", "add_user_to_group", "(", "group", ",", "role", ",", "email", ")", ":", "uri", "=", "\"groups/{0}/{1}/{2}\"", ".", "format", "(", "group", ",", "role", ",", "email", ")", "return", "__put", "(", "uri", ")" ]
Add a user to a group the caller owns Args: group (str): Group name role (str) : Role of user for group; either 'member' or 'admin' email (str): Email of user or group to add Swagger: https://api.firecloud.org/#!/Groups/addUserToGroup
[ "Add", "a", "user", "to", "a", "group", "the", "caller", "owns" ]
python
train
saltstack/salt
salt/utils/minions.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/minions.py#L50-L64
def parse_target(target_expression): '''Parse `target_expressing` splitting it into `engine`, `delimiter`, `pattern` - returns a dict''' match = TARGET_REX.match(target_expression) if not match: log.warning('Unable to parse target "%s"', target_expression) ret = { 'engine': None, 'delimiter': None, 'pattern': target_expression, } else: ret = match.groupdict() return ret
[ "def", "parse_target", "(", "target_expression", ")", ":", "match", "=", "TARGET_REX", ".", "match", "(", "target_expression", ")", "if", "not", "match", ":", "log", ".", "warning", "(", "'Unable to parse target \"%s\"'", ",", "target_expression", ")", "ret", "=", "{", "'engine'", ":", "None", ",", "'delimiter'", ":", "None", ",", "'pattern'", ":", "target_expression", ",", "}", "else", ":", "ret", "=", "match", ".", "groupdict", "(", ")", "return", "ret" ]
Parse `target_expressing` splitting it into `engine`, `delimiter`, `pattern` - returns a dict
[ "Parse", "target_expressing", "splitting", "it", "into", "engine", "delimiter", "pattern", "-", "returns", "a", "dict" ]
python
train
spotify/snakebite
snakebite/client.py
https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/client.py#L1174-L1246
def _find_items(self, paths, processor, include_toplevel=False, include_children=False, recurse=False, check_nonexistence=False): ''' Request file info from the NameNode and call the processor on the node(s) returned :param paths: A list of paths that need to be processed :param processor: Method that is called on an node. Method signature should be foo(path, node). For additional (static) params, use a lambda. :param include_toplevel: Boolean to enable the inclusion of the first node found. Example: listing a directory should not include the toplevel, but chmod should only operate on the path that is input, so it should include the toplevel. :param include_children: Include children (when the path is a directory) in processing. Recurse will always include children. Example: listing a directory should include children, but chmod shouldn't. :param recurse: Recurse into children if they are directories. ''' if not paths: paths = [posixpath.join("/user", get_current_username())] # Expand paths if necessary (/foo/{bar,baz} --> ['/foo/bar', '/foo/baz']) paths = glob.expand_paths(paths) for path in paths: if not path.startswith("/"): path = self._join_user_path(path) # Normalize path (remove double /, handle '..', remove trailing /, etc) path = self._normalize_path(path) log.debug("Trying to find path %s" % path) if glob.has_magic(path): log.debug("Dealing with globs in %s" % path) for item in self._glob_find(path, processor, include_toplevel): yield item else: fileinfo = self._get_file_info(path) if not fileinfo and not check_nonexistence: raise FileNotFoundException("`%s': No such file or directory" % path) elif not fileinfo and check_nonexistence: yield processor(path, None) continue elif fileinfo and check_nonexistence: yield {"path": path, "result": False, "error": "File already exists"} continue if (include_toplevel and fileinfo) or not self._is_dir(fileinfo.fs): # Construct the full path before processing full_path = self._get_full_path(path, fileinfo.fs) log.debug("Added %s to to result set" % full_path) entry = processor(full_path, fileinfo.fs) yield entry if self._is_dir(fileinfo.fs) and (include_children or recurse): for node in self._get_dir_listing(path): full_path = self._get_full_path(path, node) entry = processor(full_path, node) yield entry # Recurse into directories if recurse and self._is_dir(node): # Construct the full path before processing full_path = posixpath.join(path, node.path) for item in self._find_items([full_path], processor, include_toplevel=False, include_children=False, recurse=recurse): yield item
[ "def", "_find_items", "(", "self", ",", "paths", ",", "processor", ",", "include_toplevel", "=", "False", ",", "include_children", "=", "False", ",", "recurse", "=", "False", ",", "check_nonexistence", "=", "False", ")", ":", "if", "not", "paths", ":", "paths", "=", "[", "posixpath", ".", "join", "(", "\"/user\"", ",", "get_current_username", "(", ")", ")", "]", "# Expand paths if necessary (/foo/{bar,baz} --> ['/foo/bar', '/foo/baz'])", "paths", "=", "glob", ".", "expand_paths", "(", "paths", ")", "for", "path", "in", "paths", ":", "if", "not", "path", ".", "startswith", "(", "\"/\"", ")", ":", "path", "=", "self", ".", "_join_user_path", "(", "path", ")", "# Normalize path (remove double /, handle '..', remove trailing /, etc)", "path", "=", "self", ".", "_normalize_path", "(", "path", ")", "log", ".", "debug", "(", "\"Trying to find path %s\"", "%", "path", ")", "if", "glob", ".", "has_magic", "(", "path", ")", ":", "log", ".", "debug", "(", "\"Dealing with globs in %s\"", "%", "path", ")", "for", "item", "in", "self", ".", "_glob_find", "(", "path", ",", "processor", ",", "include_toplevel", ")", ":", "yield", "item", "else", ":", "fileinfo", "=", "self", ".", "_get_file_info", "(", "path", ")", "if", "not", "fileinfo", "and", "not", "check_nonexistence", ":", "raise", "FileNotFoundException", "(", "\"`%s': No such file or directory\"", "%", "path", ")", "elif", "not", "fileinfo", "and", "check_nonexistence", ":", "yield", "processor", "(", "path", ",", "None", ")", "continue", "elif", "fileinfo", "and", "check_nonexistence", ":", "yield", "{", "\"path\"", ":", "path", ",", "\"result\"", ":", "False", ",", "\"error\"", ":", "\"File already exists\"", "}", "continue", "if", "(", "include_toplevel", "and", "fileinfo", ")", "or", "not", "self", ".", "_is_dir", "(", "fileinfo", ".", "fs", ")", ":", "# Construct the full path before processing", "full_path", "=", "self", ".", "_get_full_path", "(", "path", ",", "fileinfo", ".", "fs", ")", "log", ".", "debug", "(", "\"Added %s to to result set\"", "%", "full_path", ")", "entry", "=", "processor", "(", "full_path", ",", "fileinfo", ".", "fs", ")", "yield", "entry", "if", "self", ".", "_is_dir", "(", "fileinfo", ".", "fs", ")", "and", "(", "include_children", "or", "recurse", ")", ":", "for", "node", "in", "self", ".", "_get_dir_listing", "(", "path", ")", ":", "full_path", "=", "self", ".", "_get_full_path", "(", "path", ",", "node", ")", "entry", "=", "processor", "(", "full_path", ",", "node", ")", "yield", "entry", "# Recurse into directories", "if", "recurse", "and", "self", ".", "_is_dir", "(", "node", ")", ":", "# Construct the full path before processing", "full_path", "=", "posixpath", ".", "join", "(", "path", ",", "node", ".", "path", ")", "for", "item", "in", "self", ".", "_find_items", "(", "[", "full_path", "]", ",", "processor", ",", "include_toplevel", "=", "False", ",", "include_children", "=", "False", ",", "recurse", "=", "recurse", ")", ":", "yield", "item" ]
Request file info from the NameNode and call the processor on the node(s) returned :param paths: A list of paths that need to be processed :param processor: Method that is called on an node. Method signature should be foo(path, node). For additional (static) params, use a lambda. :param include_toplevel: Boolean to enable the inclusion of the first node found. Example: listing a directory should not include the toplevel, but chmod should only operate on the path that is input, so it should include the toplevel. :param include_children: Include children (when the path is a directory) in processing. Recurse will always include children. Example: listing a directory should include children, but chmod shouldn't. :param recurse: Recurse into children if they are directories.
[ "Request", "file", "info", "from", "the", "NameNode", "and", "call", "the", "processor", "on", "the", "node", "(", "s", ")", "returned" ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1203-L1207
def p_partselect_pointer_plus(self, p): 'partselect : pointer LBRACKET expression PLUSCOLON expression RBRACKET' p[0] = Partselect(p[1], p[3], Plus( p[3], p[5], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_partselect_pointer_plus", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Partselect", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "Plus", "(", "p", "[", "3", "]", ",", "p", "[", "5", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
partselect : pointer LBRACKET expression PLUSCOLON expression RBRACKET
[ "partselect", ":", "pointer", "LBRACKET", "expression", "PLUSCOLON", "expression", "RBRACKET" ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L272-L307
def _postprocess_somatic(in_file, paired): """Post-process somatic calls to provide standard output. - Converts SGT and NT into standard VCF GT fields - Replace generic TUMOR NORMAL names in VCF with sample names. """ out_file = in_file.replace(".vcf.gz", "-fixed.vcf") if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"): with file_transaction(paired.tumor_data, out_file) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: added_gt = False normal_index, tumor_index = (None, None) for line in in_handle: if line.startswith("##FORMAT") and not added_gt: added_gt = True out_handle.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n') out_handle.write(line) elif line.startswith("#CHROM"): assert added_gt parts = line.strip().split("\t") normal_index = parts.index("NORMAL") tumor_index = parts.index("TUMOR") line = line.replace("NORMAL", paired.normal_name).replace("TUMOR", paired.tumor_name) out_handle.write(line) elif line.startswith("#"): out_handle.write(line) else: parts = line.rstrip().split("\t") tumor_gt, normal_gt = _tumor_normal_genotypes(parts[3], parts[4].split(","), parts[7].split(";"), in_file, parts[:2]) parts[8] = "GT:%s" % parts[8] parts[normal_index] = "%s:%s" % (normal_gt, parts[normal_index]) parts[tumor_index] = "%s:%s" % (tumor_gt, parts[tumor_index]) out_handle.write("\t".join(parts) + "\n") return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"])
[ "def", "_postprocess_somatic", "(", "in_file", ",", "paired", ")", ":", "out_file", "=", "in_file", ".", "replace", "(", "\".vcf.gz\"", ",", "\"-fixed.vcf\"", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", "and", "not", "utils", ".", "file_exists", "(", "out_file", "+", "\".gz\"", ")", ":", "with", "file_transaction", "(", "paired", ".", "tumor_data", ",", "out_file", ")", "as", "tx_out_file", ":", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":", "with", "open", "(", "tx_out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "added_gt", "=", "False", "normal_index", ",", "tumor_index", "=", "(", "None", ",", "None", ")", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"##FORMAT\"", ")", "and", "not", "added_gt", ":", "added_gt", "=", "True", "out_handle", ".", "write", "(", "'##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\\n'", ")", "out_handle", ".", "write", "(", "line", ")", "elif", "line", ".", "startswith", "(", "\"#CHROM\"", ")", ":", "assert", "added_gt", "parts", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "normal_index", "=", "parts", ".", "index", "(", "\"NORMAL\"", ")", "tumor_index", "=", "parts", ".", "index", "(", "\"TUMOR\"", ")", "line", "=", "line", ".", "replace", "(", "\"NORMAL\"", ",", "paired", ".", "normal_name", ")", ".", "replace", "(", "\"TUMOR\"", ",", "paired", ".", "tumor_name", ")", "out_handle", ".", "write", "(", "line", ")", "elif", "line", ".", "startswith", "(", "\"#\"", ")", ":", "out_handle", ".", "write", "(", "line", ")", "else", ":", "parts", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "tumor_gt", ",", "normal_gt", "=", "_tumor_normal_genotypes", "(", "parts", "[", "3", "]", ",", "parts", "[", "4", "]", ".", "split", "(", "\",\"", ")", ",", "parts", "[", "7", "]", ".", "split", "(", "\";\"", ")", ",", "in_file", ",", "parts", "[", ":", "2", "]", ")", "parts", "[", "8", "]", "=", "\"GT:%s\"", "%", "parts", "[", "8", "]", "parts", "[", "normal_index", "]", "=", "\"%s:%s\"", "%", "(", "normal_gt", ",", "parts", "[", "normal_index", "]", ")", "parts", "[", "tumor_index", "]", "=", "\"%s:%s\"", "%", "(", "tumor_gt", ",", "parts", "[", "tumor_index", "]", ")", "out_handle", ".", "write", "(", "\"\\t\"", ".", "join", "(", "parts", ")", "+", "\"\\n\"", ")", "return", "vcfutils", ".", "bgzip_and_index", "(", "out_file", ",", "paired", ".", "tumor_data", "[", "\"config\"", "]", ")" ]
Post-process somatic calls to provide standard output. - Converts SGT and NT into standard VCF GT fields - Replace generic TUMOR NORMAL names in VCF with sample names.
[ "Post", "-", "process", "somatic", "calls", "to", "provide", "standard", "output", "." ]
python
train
fictorial/pygameui
pygameui/kvc.py
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/kvc.py#L77-L107
def set_value_for_keypath(obj, path, new_value, preserve_child = False): """Set attribute value new_value at key path of start object obj. """ parts = path.split('.') last_part = len(parts) - 1 dst = obj for i, part in enumerate(parts): match = re.match(list_index_re, part) if match is not None: dst = _extract(dst, match.group(1)) if not isinstance(dst, list) and not isinstance(dst, tuple): raise TypeError('expected list/tuple') index = int(match.group(2)) if i == last_part: dst[index] = new_value else: dst = dst[index] else: if i != last_part: dst = _extract(dst, part) else: if isinstance(dst, dict): dst[part] = new_value else: if not preserve_child: setattr(dst, part, new_value) else: try: v = getattr(dst, part) except AttributeError: setattr(dst, part, new_value)
[ "def", "set_value_for_keypath", "(", "obj", ",", "path", ",", "new_value", ",", "preserve_child", "=", "False", ")", ":", "parts", "=", "path", ".", "split", "(", "'.'", ")", "last_part", "=", "len", "(", "parts", ")", "-", "1", "dst", "=", "obj", "for", "i", ",", "part", "in", "enumerate", "(", "parts", ")", ":", "match", "=", "re", ".", "match", "(", "list_index_re", ",", "part", ")", "if", "match", "is", "not", "None", ":", "dst", "=", "_extract", "(", "dst", ",", "match", ".", "group", "(", "1", ")", ")", "if", "not", "isinstance", "(", "dst", ",", "list", ")", "and", "not", "isinstance", "(", "dst", ",", "tuple", ")", ":", "raise", "TypeError", "(", "'expected list/tuple'", ")", "index", "=", "int", "(", "match", ".", "group", "(", "2", ")", ")", "if", "i", "==", "last_part", ":", "dst", "[", "index", "]", "=", "new_value", "else", ":", "dst", "=", "dst", "[", "index", "]", "else", ":", "if", "i", "!=", "last_part", ":", "dst", "=", "_extract", "(", "dst", ",", "part", ")", "else", ":", "if", "isinstance", "(", "dst", ",", "dict", ")", ":", "dst", "[", "part", "]", "=", "new_value", "else", ":", "if", "not", "preserve_child", ":", "setattr", "(", "dst", ",", "part", ",", "new_value", ")", "else", ":", "try", ":", "v", "=", "getattr", "(", "dst", ",", "part", ")", "except", "AttributeError", ":", "setattr", "(", "dst", ",", "part", ",", "new_value", ")" ]
Set attribute value new_value at key path of start object obj.
[ "Set", "attribute", "value", "new_value", "at", "key", "path", "of", "start", "object", "obj", "." ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L1062-L1129
def load_table_from_uri( self, source_uris, destination, job_id=None, job_id_prefix=None, location=None, project=None, job_config=None, retry=DEFAULT_RETRY, ): """Starts a job for loading data into a table from CloudStorage. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load Arguments: source_uris (Union[str, Sequence[str]]): URIs of data files to be loaded; in format ``gs://<bucket_name>/<object_name_or_glob>``. destination (Union[ \ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): Table into which data is to be loaded. If a string is passed in, this method attempts to create a table reference from a string using :func:`google.cloud.bigquery.table.TableReference.from_string`. Keyword Arguments: job_id (str): (Optional) Name of the job. job_id_prefix (str): (Optional) the user-provided prefix for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of the destination table. project (str): Project ID of the project of where to run the job. Defaults to the client's project. job_config (google.cloud.bigquery.job.LoadJobConfig): (Optional) Extra configuration options for the job. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.job.LoadJob: A new load job. """ job_id = _make_job_id(job_id, job_id_prefix) if project is None: project = self.project if location is None: location = self.location job_ref = job._JobReference(job_id, project=project, location=location) if isinstance(source_uris, six.string_types): source_uris = [source_uris] destination = _table_arg_to_table_ref(destination, default_project=self.project) load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config) load_job._begin(retry=retry) return load_job
[ "def", "load_table_from_uri", "(", "self", ",", "source_uris", ",", "destination", ",", "job_id", "=", "None", ",", "job_id_prefix", "=", "None", ",", "location", "=", "None", ",", "project", "=", "None", ",", "job_config", "=", "None", ",", "retry", "=", "DEFAULT_RETRY", ",", ")", ":", "job_id", "=", "_make_job_id", "(", "job_id", ",", "job_id_prefix", ")", "if", "project", "is", "None", ":", "project", "=", "self", ".", "project", "if", "location", "is", "None", ":", "location", "=", "self", ".", "location", "job_ref", "=", "job", ".", "_JobReference", "(", "job_id", ",", "project", "=", "project", ",", "location", "=", "location", ")", "if", "isinstance", "(", "source_uris", ",", "six", ".", "string_types", ")", ":", "source_uris", "=", "[", "source_uris", "]", "destination", "=", "_table_arg_to_table_ref", "(", "destination", ",", "default_project", "=", "self", ".", "project", ")", "load_job", "=", "job", ".", "LoadJob", "(", "job_ref", ",", "source_uris", ",", "destination", ",", "self", ",", "job_config", ")", "load_job", ".", "_begin", "(", "retry", "=", "retry", ")", "return", "load_job" ]
Starts a job for loading data into a table from CloudStorage. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load Arguments: source_uris (Union[str, Sequence[str]]): URIs of data files to be loaded; in format ``gs://<bucket_name>/<object_name_or_glob>``. destination (Union[ \ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): Table into which data is to be loaded. If a string is passed in, this method attempts to create a table reference from a string using :func:`google.cloud.bigquery.table.TableReference.from_string`. Keyword Arguments: job_id (str): (Optional) Name of the job. job_id_prefix (str): (Optional) the user-provided prefix for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of the destination table. project (str): Project ID of the project of where to run the job. Defaults to the client's project. job_config (google.cloud.bigquery.job.LoadJobConfig): (Optional) Extra configuration options for the job. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.job.LoadJob: A new load job.
[ "Starts", "a", "job", "for", "loading", "data", "into", "a", "table", "from", "CloudStorage", "." ]
python
train
CyberReboot/vent
vent/extras/rmq_es_connector/rmq_es_connector.py
https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/extras/rmq_es_connector/rmq_es_connector.py#L33-L58
def connections(self, wait): """ wait for connections to both rabbitmq and elasticsearch to be made before binding a routing key to a channel and sending messages to elasticsearch """ while wait: try: params = pika.ConnectionParameters(host=self.rmq_host, port=self.rmq_port) connection = pika.BlockingConnection(params) self.channel = connection.channel() self.channel.exchange_declare(exchange='topic_recs', exchange_type='topic') result = self.channel.queue_declare() self.queue_name = result.method.queue self.es_conn = Elasticsearch([{'host': self.es_host, 'port': self.es_port}]) wait = False print('connected to rabbitmq and elasticsearch...') except Exception as e: # pragma: no cover print(str(e)) print('waiting for connection to rabbitmq...' + str(e)) time.sleep(2) wait = True
[ "def", "connections", "(", "self", ",", "wait", ")", ":", "while", "wait", ":", "try", ":", "params", "=", "pika", ".", "ConnectionParameters", "(", "host", "=", "self", ".", "rmq_host", ",", "port", "=", "self", ".", "rmq_port", ")", "connection", "=", "pika", ".", "BlockingConnection", "(", "params", ")", "self", ".", "channel", "=", "connection", ".", "channel", "(", ")", "self", ".", "channel", ".", "exchange_declare", "(", "exchange", "=", "'topic_recs'", ",", "exchange_type", "=", "'topic'", ")", "result", "=", "self", ".", "channel", ".", "queue_declare", "(", ")", "self", ".", "queue_name", "=", "result", ".", "method", ".", "queue", "self", ".", "es_conn", "=", "Elasticsearch", "(", "[", "{", "'host'", ":", "self", ".", "es_host", ",", "'port'", ":", "self", ".", "es_port", "}", "]", ")", "wait", "=", "False", "print", "(", "'connected to rabbitmq and elasticsearch...'", ")", "except", "Exception", "as", "e", ":", "# pragma: no cover", "print", "(", "str", "(", "e", ")", ")", "print", "(", "'waiting for connection to rabbitmq...'", "+", "str", "(", "e", ")", ")", "time", ".", "sleep", "(", "2", ")", "wait", "=", "True" ]
wait for connections to both rabbitmq and elasticsearch to be made before binding a routing key to a channel and sending messages to elasticsearch
[ "wait", "for", "connections", "to", "both", "rabbitmq", "and", "elasticsearch", "to", "be", "made", "before", "binding", "a", "routing", "key", "to", "a", "channel", "and", "sending", "messages", "to", "elasticsearch" ]
python
train
projecthamster/hamster
src/hamster/storage/db.py
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/storage/db.py#L342-L357
def __get_category_id(self, name): """returns category by it's name""" query = """ SELECT id from categories WHERE lower(name) = lower(?) ORDER BY id desc LIMIT 1 """ res = self.fetchone(query, (name, )) if res: return res['id'] return None
[ "def", "__get_category_id", "(", "self", ",", "name", ")", ":", "query", "=", "\"\"\"\n SELECT id from categories\n WHERE lower(name) = lower(?)\n ORDER BY id desc\n LIMIT 1\n \"\"\"", "res", "=", "self", ".", "fetchone", "(", "query", ",", "(", "name", ",", ")", ")", "if", "res", ":", "return", "res", "[", "'id'", "]", "return", "None" ]
returns category by it's name
[ "returns", "category", "by", "it", "s", "name" ]
python
train
buzzfeed/caliendo
caliendo/facade.py
https://github.com/buzzfeed/caliendo/blob/1628a10f7782ad67c0422b5cbc9bf4979ac40abc/caliendo/facade.py#L23-L38
def should_exclude(type_or_instance, exclusion_list): """ Tests whether an object should be simply returned when being wrapped """ if type_or_instance in exclusion_list: # Check class definition return True if type(type_or_instance) in exclusion_list: # Check instance type return True try: if type_or_instance.__class__ in exclusion_list: # Check instance class return True except: pass return False
[ "def", "should_exclude", "(", "type_or_instance", ",", "exclusion_list", ")", ":", "if", "type_or_instance", "in", "exclusion_list", ":", "# Check class definition", "return", "True", "if", "type", "(", "type_or_instance", ")", "in", "exclusion_list", ":", "# Check instance type", "return", "True", "try", ":", "if", "type_or_instance", ".", "__class__", "in", "exclusion_list", ":", "# Check instance class", "return", "True", "except", ":", "pass", "return", "False" ]
Tests whether an object should be simply returned when being wrapped
[ "Tests", "whether", "an", "object", "should", "be", "simply", "returned", "when", "being", "wrapped" ]
python
train
UCSBarchlab/PyRTL
pyrtl/rtllib/aes.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/rtllib/aes.py#L53-L76
def encryption(self, plaintext, key): """ Builds a single cycle AES Encryption circuit :param WireVector plaintext: text to encrypt :param WireVector key: AES key to use to encrypt :return: a WireVector containing the ciphertext """ if len(plaintext) != self._key_len: raise pyrtl.PyrtlError("Ciphertext length is invalid") if len(key) != self._key_len: raise pyrtl.PyrtlError("key length is invalid") key_list = self._key_gen(key) t = self._add_round_key(plaintext, key_list[0]) for round in range(1, 11): t = self._sub_bytes(t) t = self._shift_rows(t) if round != 10: t = self._mix_columns(t) t = self._add_round_key(t, key_list[round]) return t
[ "def", "encryption", "(", "self", ",", "plaintext", ",", "key", ")", ":", "if", "len", "(", "plaintext", ")", "!=", "self", ".", "_key_len", ":", "raise", "pyrtl", ".", "PyrtlError", "(", "\"Ciphertext length is invalid\"", ")", "if", "len", "(", "key", ")", "!=", "self", ".", "_key_len", ":", "raise", "pyrtl", ".", "PyrtlError", "(", "\"key length is invalid\"", ")", "key_list", "=", "self", ".", "_key_gen", "(", "key", ")", "t", "=", "self", ".", "_add_round_key", "(", "plaintext", ",", "key_list", "[", "0", "]", ")", "for", "round", "in", "range", "(", "1", ",", "11", ")", ":", "t", "=", "self", ".", "_sub_bytes", "(", "t", ")", "t", "=", "self", ".", "_shift_rows", "(", "t", ")", "if", "round", "!=", "10", ":", "t", "=", "self", ".", "_mix_columns", "(", "t", ")", "t", "=", "self", ".", "_add_round_key", "(", "t", ",", "key_list", "[", "round", "]", ")", "return", "t" ]
Builds a single cycle AES Encryption circuit :param WireVector plaintext: text to encrypt :param WireVector key: AES key to use to encrypt :return: a WireVector containing the ciphertext
[ "Builds", "a", "single", "cycle", "AES", "Encryption", "circuit" ]
python
train
dmaust/rounding
rounding/stochastic.py
https://github.com/dmaust/rounding/blob/06731dff803c30c0741e3199888e7e5266ad99cc/rounding/stochastic.py#L36-L55
def round(self, x): """Round the given value. @param x: to round @type x: numeric """ fraction, scaled_x, scale = self._get_fraction(x) if fraction < self.minimum_stochastic_distance or 1-fraction <self.minimum_stochastic_distance: result = round(x,self.precision) else: rounddown = fraction < self.random_generator.random() if rounddown: result = math.floor(scaled_x) / scale else: result = math.ceil(scaled_x) / scale self._record_roundoff_error(x, result) return result
[ "def", "round", "(", "self", ",", "x", ")", ":", "fraction", ",", "scaled_x", ",", "scale", "=", "self", ".", "_get_fraction", "(", "x", ")", "if", "fraction", "<", "self", ".", "minimum_stochastic_distance", "or", "1", "-", "fraction", "<", "self", ".", "minimum_stochastic_distance", ":", "result", "=", "round", "(", "x", ",", "self", ".", "precision", ")", "else", ":", "rounddown", "=", "fraction", "<", "self", ".", "random_generator", ".", "random", "(", ")", "if", "rounddown", ":", "result", "=", "math", ".", "floor", "(", "scaled_x", ")", "/", "scale", "else", ":", "result", "=", "math", ".", "ceil", "(", "scaled_x", ")", "/", "scale", "self", ".", "_record_roundoff_error", "(", "x", ",", "result", ")", "return", "result" ]
Round the given value. @param x: to round @type x: numeric
[ "Round", "the", "given", "value", "." ]
python
train
QuantEcon/QuantEcon.py
quantecon/game_theory/repeated_game.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/game_theory/repeated_game.py#L435-L458
def _update_u(u, W): """ Update the threat points if it not feasible in the new W, by the minimum of new feasible payoffs. Parameters ---------- u : ndarray(float, ndim=1) The threat points. W : ndarray(float, ndim=1) The points that construct the feasible payoff convex hull. Returns ------- u : ndarray(float, ndim=1) The updated threat points. """ for i in range(2): W_min = W[:, i].min() if u[i] < W_min: u[i] = W_min return u
[ "def", "_update_u", "(", "u", ",", "W", ")", ":", "for", "i", "in", "range", "(", "2", ")", ":", "W_min", "=", "W", "[", ":", ",", "i", "]", ".", "min", "(", ")", "if", "u", "[", "i", "]", "<", "W_min", ":", "u", "[", "i", "]", "=", "W_min", "return", "u" ]
Update the threat points if it not feasible in the new W, by the minimum of new feasible payoffs. Parameters ---------- u : ndarray(float, ndim=1) The threat points. W : ndarray(float, ndim=1) The points that construct the feasible payoff convex hull. Returns ------- u : ndarray(float, ndim=1) The updated threat points.
[ "Update", "the", "threat", "points", "if", "it", "not", "feasible", "in", "the", "new", "W", "by", "the", "minimum", "of", "new", "feasible", "payoffs", "." ]
python
train
senaite/senaite.core
bika/lims/content/analysisspec.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisspec.py#L148-L159
def Title(self): """ Return the title if possible, else return the Sample type. Fall back on the instance's ID if there's no sample type or title. """ title = '' if self.title: title = self.title else: sampletype = self.getSampleType() if sampletype: title = sampletype.Title() return safe_unicode(title).encode('utf-8')
[ "def", "Title", "(", "self", ")", ":", "title", "=", "''", "if", "self", ".", "title", ":", "title", "=", "self", ".", "title", "else", ":", "sampletype", "=", "self", ".", "getSampleType", "(", ")", "if", "sampletype", ":", "title", "=", "sampletype", ".", "Title", "(", ")", "return", "safe_unicode", "(", "title", ")", ".", "encode", "(", "'utf-8'", ")" ]
Return the title if possible, else return the Sample type. Fall back on the instance's ID if there's no sample type or title.
[ "Return", "the", "title", "if", "possible", "else", "return", "the", "Sample", "type", ".", "Fall", "back", "on", "the", "instance", "s", "ID", "if", "there", "s", "no", "sample", "type", "or", "title", "." ]
python
train
kxgames/vecrec
vecrec/shapes.py
https://github.com/kxgames/vecrec/blob/18b0841419de21a644b4511e2229af853ed09529/vecrec/shapes.py#L249-L254
def perp_product(self, other): """ Return the perp product of the given vectors. The perp product is just a cross product where the third dimension is taken to be zero and the result is returned as a scalar. """ return self.x * other.y - self.y * other.x
[ "def", "perp_product", "(", "self", ",", "other", ")", ":", "return", "self", ".", "x", "*", "other", ".", "y", "-", "self", ".", "y", "*", "other", ".", "x" ]
Return the perp product of the given vectors. The perp product is just a cross product where the third dimension is taken to be zero and the result is returned as a scalar.
[ "Return", "the", "perp", "product", "of", "the", "given", "vectors", ".", "The", "perp", "product", "is", "just", "a", "cross", "product", "where", "the", "third", "dimension", "is", "taken", "to", "be", "zero", "and", "the", "result", "is", "returned", "as", "a", "scalar", "." ]
python
train
MAVENSDC/PyTplot
pytplot/QtPlotter/CustomAxis/AxisItem.py
https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/QtPlotter/CustomAxis/AxisItem.py#L40-L285
def generateDrawSpecs(self, p): """ Calls tickValues() and tickStrings() to determine where and how ticks should be drawn, then generates from this a set of drawing commands to be interpreted by drawPicture(). """ profiler = debug.Profiler() # bounds = self.boundingRect() bounds = self.mapRectFromParent(self.geometry()) linkedView = self.linkedView() if linkedView is None or self.grid is False: tickBounds = bounds else: tickBounds = linkedView.mapRectToItem(self, linkedView.boundingRect()) if self.orientation == 'left': span = (bounds.topRight(), bounds.bottomRight()) tickStart = tickBounds.right() tickStop = bounds.right() tickDir = -1 axis = 0 elif self.orientation == 'right': span = (bounds.topLeft(), bounds.bottomLeft()) tickStart = tickBounds.left() tickStop = bounds.left() tickDir = 1 axis = 0 elif self.orientation == 'top': span = (bounds.bottomLeft(), bounds.bottomRight()) tickStart = tickBounds.bottom() tickStop = bounds.bottom() tickDir = -1 axis = 1 elif self.orientation == 'bottom': span = (bounds.topLeft(), bounds.topRight()) tickStart = tickBounds.top() tickStop = bounds.top() tickDir = 1 axis = 1 # print tickStart, tickStop, span ## determine size of this item in pixels points = list(map(self.mapToDevice, span)) if None in points: return lengthInPixels = Point(points[1] - points[0]).length() if lengthInPixels == 0: return # Determine major / minor / subminor axis ticks if self._tickLevels is None: tickLevels = self.tickValues(self.range[0], self.range[1], lengthInPixels) tickStrings = None else: ## parse self.tickLevels into the formats returned by tickLevels() and tickStrings() tickLevels = [] tickStrings = [] for level in self._tickLevels: values = [] strings = [] tickLevels.append((None, values)) tickStrings.append(strings) for val, strn in level: values.append(val) strings.append(strn) ## determine mapping between tick values and local coordinates dif = self.range[1] - self.range[0] if dif == 0: xScale = 1 offset = 0 else: if axis == 0: xScale = -bounds.height() / dif offset = self.range[0] * xScale - bounds.height() else: xScale = bounds.width() / dif offset = self.range[0] * xScale xRange = [x * xScale - offset for x in self.range] xMin = min(xRange) xMax = max(xRange) profiler('init') tickPositions = [] # remembers positions of previously drawn ticks ## compute coordinates to draw ticks ## draw three different intervals, long ticks first tickSpecs = [] for i in range(len(tickLevels)): tickPositions.append([]) ticks = tickLevels[i][1] ## length of tick tickLength = self.style['tickLength'] / ((i * 0.5) + 1.0) lineAlpha = 255 / (i + 1) if self.grid is not False: lineAlpha *= self.grid / 255. * np.clip((0.05 * lengthInPixels / (len(ticks) + 1)), 0., 1.) for v in ticks: ## determine actual position to draw this tick x = (v * xScale) - offset if x < xMin or x > xMax: ## last check to make sure no out-of-bounds ticks are drawn tickPositions[i].append(None) continue tickPositions[i].append(x) p1 = [x, x] p2 = [x, x] p1[axis] = tickStart p2[axis] = tickStop if self.grid is False: p2[axis] += tickLength * tickDir tickPen = self.pen() color = tickPen.color() color.setAlpha(lineAlpha) tickPen.setColor(color) tickSpecs.append((tickPen, Point(p1), Point(p2))) profiler('compute ticks') if self.style['stopAxisAtTick'][0] is True: stop = max(span[0].y(), min(map(min, tickPositions))) if axis == 0: span[0].setY(stop) else: span[0].setX(stop) if self.style['stopAxisAtTick'][1] is True: stop = min(span[1].y(), max(map(max, tickPositions))) if axis == 0: span[1].setY(stop) else: span[1].setX(stop) axisSpec = (self.pen(), span[0], span[1]) textOffset = self.style['tickTextOffset'][axis] ## spacing between axis and text # if self.style['autoExpandTextSpace'] is True: # textWidth = self.textWidth # textHeight = self.textHeight # else: # textWidth = self.style['tickTextWidth'] ## space allocated for horizontal text # textHeight = self.style['tickTextHeight'] ## space allocated for horizontal text textSize2 = 0 textRects = [] textSpecs = [] ## list of draw # If values are hidden, return early if not self.style['showValues']: return (axisSpec, tickSpecs, textSpecs) for i in range(min(len(tickLevels), self.style['maxTextLevel'] + 1)): ## Get the list of strings to display for this level if tickStrings is None: spacing, values = tickLevels[i] strings = self.tickStrings(values, self.autoSIPrefixScale * self.scale, spacing) else: strings = tickStrings[i] if len(strings) == 0: continue ## ignore strings belonging to ticks that were previously ignored for j in range(len(strings)): if tickPositions[i][j] is None: strings[j] = None ## Measure density of text; decide whether to draw this level rects = [] for s in strings: if s is None: rects.append(None) else: br = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, asUnicode(s)) ## boundingRect is usually just a bit too large ## (but this probably depends on per-font metrics?) br.setHeight(br.height() * 1.4) rects.append(br) textRects.append(rects[-1]) if len(textRects) > 0: ## measure all text, make sure there's enough room if axis == 0: textSize = np.sum([r.height() for r in textRects]) textSize2 = np.max([r.width() for r in textRects]) else: textSize = np.sum([r.width() for r in textRects]) textSize2 = np.max([r.height() for r in textRects]) else: textSize = 0 textSize2 = 0 if i > 0: ## always draw top level ## If the strings are too crowded, stop drawing text now. ## We use three different crowding limits based on the number ## of texts drawn so far. textFillRatio = float(textSize) / lengthInPixels finished = False for nTexts, limit in self.style['textFillLimits']: if len(textSpecs) >= nTexts and textFillRatio >= limit: finished = True break if finished: break # spacing, values = tickLevels[best] # strings = self.tickStrings(values, self.scale, spacing) # Determine exactly where tick text should be drawn for j in range(len(strings)): vstr = strings[j] if vstr is None: ## this tick was ignored because it is out of bounds continue vstr = asUnicode(vstr) x = tickPositions[i][j] # textRect = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, vstr) textRect = rects[j] height = textRect.height() width = textRect.width() # self.textHeight = height offset = max(0, self.style['tickLength']) + textOffset if self.orientation == 'left': textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter rect = QtCore.QRectF(tickStop - offset - width, x - (height / 2), width, height) elif self.orientation == 'right': textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter rect = QtCore.QRectF(tickStop + offset, x - (height / 2), width, height) elif self.orientation == 'top': textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom rect = QtCore.QRectF(x - width / 2., tickStop - offset - height, width, height) elif self.orientation == 'bottom': textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter | QtCore.Qt.AlignTop rect = QtCore.QRectF(x - width / 2., tickStop + offset, width, height) # p.setPen(self.pen()) # p.drawText(rect, textFlags, vstr) textSpecs.append((rect, textFlags, vstr)) profiler('compute text') ## update max text size if needed. self._updateMaxTextSize(textSize2) return (axisSpec, tickSpecs, textSpecs)
[ "def", "generateDrawSpecs", "(", "self", ",", "p", ")", ":", "profiler", "=", "debug", ".", "Profiler", "(", ")", "# bounds = self.boundingRect()", "bounds", "=", "self", ".", "mapRectFromParent", "(", "self", ".", "geometry", "(", ")", ")", "linkedView", "=", "self", ".", "linkedView", "(", ")", "if", "linkedView", "is", "None", "or", "self", ".", "grid", "is", "False", ":", "tickBounds", "=", "bounds", "else", ":", "tickBounds", "=", "linkedView", ".", "mapRectToItem", "(", "self", ",", "linkedView", ".", "boundingRect", "(", ")", ")", "if", "self", ".", "orientation", "==", "'left'", ":", "span", "=", "(", "bounds", ".", "topRight", "(", ")", ",", "bounds", ".", "bottomRight", "(", ")", ")", "tickStart", "=", "tickBounds", ".", "right", "(", ")", "tickStop", "=", "bounds", ".", "right", "(", ")", "tickDir", "=", "-", "1", "axis", "=", "0", "elif", "self", ".", "orientation", "==", "'right'", ":", "span", "=", "(", "bounds", ".", "topLeft", "(", ")", ",", "bounds", ".", "bottomLeft", "(", ")", ")", "tickStart", "=", "tickBounds", ".", "left", "(", ")", "tickStop", "=", "bounds", ".", "left", "(", ")", "tickDir", "=", "1", "axis", "=", "0", "elif", "self", ".", "orientation", "==", "'top'", ":", "span", "=", "(", "bounds", ".", "bottomLeft", "(", ")", ",", "bounds", ".", "bottomRight", "(", ")", ")", "tickStart", "=", "tickBounds", ".", "bottom", "(", ")", "tickStop", "=", "bounds", ".", "bottom", "(", ")", "tickDir", "=", "-", "1", "axis", "=", "1", "elif", "self", ".", "orientation", "==", "'bottom'", ":", "span", "=", "(", "bounds", ".", "topLeft", "(", ")", ",", "bounds", ".", "topRight", "(", ")", ")", "tickStart", "=", "tickBounds", ".", "top", "(", ")", "tickStop", "=", "bounds", ".", "top", "(", ")", "tickDir", "=", "1", "axis", "=", "1", "# print tickStart, tickStop, span", "## determine size of this item in pixels", "points", "=", "list", "(", "map", "(", "self", ".", "mapToDevice", ",", "span", ")", ")", "if", "None", "in", "points", ":", "return", "lengthInPixels", "=", "Point", "(", "points", "[", "1", "]", "-", "points", "[", "0", "]", ")", ".", "length", "(", ")", "if", "lengthInPixels", "==", "0", ":", "return", "# Determine major / minor / subminor axis ticks", "if", "self", ".", "_tickLevels", "is", "None", ":", "tickLevels", "=", "self", ".", "tickValues", "(", "self", ".", "range", "[", "0", "]", ",", "self", ".", "range", "[", "1", "]", ",", "lengthInPixels", ")", "tickStrings", "=", "None", "else", ":", "## parse self.tickLevels into the formats returned by tickLevels() and tickStrings()", "tickLevels", "=", "[", "]", "tickStrings", "=", "[", "]", "for", "level", "in", "self", ".", "_tickLevels", ":", "values", "=", "[", "]", "strings", "=", "[", "]", "tickLevels", ".", "append", "(", "(", "None", ",", "values", ")", ")", "tickStrings", ".", "append", "(", "strings", ")", "for", "val", ",", "strn", "in", "level", ":", "values", ".", "append", "(", "val", ")", "strings", ".", "append", "(", "strn", ")", "## determine mapping between tick values and local coordinates", "dif", "=", "self", ".", "range", "[", "1", "]", "-", "self", ".", "range", "[", "0", "]", "if", "dif", "==", "0", ":", "xScale", "=", "1", "offset", "=", "0", "else", ":", "if", "axis", "==", "0", ":", "xScale", "=", "-", "bounds", ".", "height", "(", ")", "/", "dif", "offset", "=", "self", ".", "range", "[", "0", "]", "*", "xScale", "-", "bounds", ".", "height", "(", ")", "else", ":", "xScale", "=", "bounds", ".", "width", "(", ")", "/", "dif", "offset", "=", "self", ".", "range", "[", "0", "]", "*", "xScale", "xRange", "=", "[", "x", "*", "xScale", "-", "offset", "for", "x", "in", "self", ".", "range", "]", "xMin", "=", "min", "(", "xRange", ")", "xMax", "=", "max", "(", "xRange", ")", "profiler", "(", "'init'", ")", "tickPositions", "=", "[", "]", "# remembers positions of previously drawn ticks", "## compute coordinates to draw ticks", "## draw three different intervals, long ticks first", "tickSpecs", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "tickLevels", ")", ")", ":", "tickPositions", ".", "append", "(", "[", "]", ")", "ticks", "=", "tickLevels", "[", "i", "]", "[", "1", "]", "## length of tick", "tickLength", "=", "self", ".", "style", "[", "'tickLength'", "]", "/", "(", "(", "i", "*", "0.5", ")", "+", "1.0", ")", "lineAlpha", "=", "255", "/", "(", "i", "+", "1", ")", "if", "self", ".", "grid", "is", "not", "False", ":", "lineAlpha", "*=", "self", ".", "grid", "/", "255.", "*", "np", ".", "clip", "(", "(", "0.05", "*", "lengthInPixels", "/", "(", "len", "(", "ticks", ")", "+", "1", ")", ")", ",", "0.", ",", "1.", ")", "for", "v", "in", "ticks", ":", "## determine actual position to draw this tick", "x", "=", "(", "v", "*", "xScale", ")", "-", "offset", "if", "x", "<", "xMin", "or", "x", ">", "xMax", ":", "## last check to make sure no out-of-bounds ticks are drawn", "tickPositions", "[", "i", "]", ".", "append", "(", "None", ")", "continue", "tickPositions", "[", "i", "]", ".", "append", "(", "x", ")", "p1", "=", "[", "x", ",", "x", "]", "p2", "=", "[", "x", ",", "x", "]", "p1", "[", "axis", "]", "=", "tickStart", "p2", "[", "axis", "]", "=", "tickStop", "if", "self", ".", "grid", "is", "False", ":", "p2", "[", "axis", "]", "+=", "tickLength", "*", "tickDir", "tickPen", "=", "self", ".", "pen", "(", ")", "color", "=", "tickPen", ".", "color", "(", ")", "color", ".", "setAlpha", "(", "lineAlpha", ")", "tickPen", ".", "setColor", "(", "color", ")", "tickSpecs", ".", "append", "(", "(", "tickPen", ",", "Point", "(", "p1", ")", ",", "Point", "(", "p2", ")", ")", ")", "profiler", "(", "'compute ticks'", ")", "if", "self", ".", "style", "[", "'stopAxisAtTick'", "]", "[", "0", "]", "is", "True", ":", "stop", "=", "max", "(", "span", "[", "0", "]", ".", "y", "(", ")", ",", "min", "(", "map", "(", "min", ",", "tickPositions", ")", ")", ")", "if", "axis", "==", "0", ":", "span", "[", "0", "]", ".", "setY", "(", "stop", ")", "else", ":", "span", "[", "0", "]", ".", "setX", "(", "stop", ")", "if", "self", ".", "style", "[", "'stopAxisAtTick'", "]", "[", "1", "]", "is", "True", ":", "stop", "=", "min", "(", "span", "[", "1", "]", ".", "y", "(", ")", ",", "max", "(", "map", "(", "max", ",", "tickPositions", ")", ")", ")", "if", "axis", "==", "0", ":", "span", "[", "1", "]", ".", "setY", "(", "stop", ")", "else", ":", "span", "[", "1", "]", ".", "setX", "(", "stop", ")", "axisSpec", "=", "(", "self", ".", "pen", "(", ")", ",", "span", "[", "0", "]", ",", "span", "[", "1", "]", ")", "textOffset", "=", "self", ".", "style", "[", "'tickTextOffset'", "]", "[", "axis", "]", "## spacing between axis and text", "# if self.style['autoExpandTextSpace'] is True:", "# textWidth = self.textWidth", "# textHeight = self.textHeight", "# else:", "# textWidth = self.style['tickTextWidth'] ## space allocated for horizontal text", "# textHeight = self.style['tickTextHeight'] ## space allocated for horizontal text", "textSize2", "=", "0", "textRects", "=", "[", "]", "textSpecs", "=", "[", "]", "## list of draw", "# If values are hidden, return early", "if", "not", "self", ".", "style", "[", "'showValues'", "]", ":", "return", "(", "axisSpec", ",", "tickSpecs", ",", "textSpecs", ")", "for", "i", "in", "range", "(", "min", "(", "len", "(", "tickLevels", ")", ",", "self", ".", "style", "[", "'maxTextLevel'", "]", "+", "1", ")", ")", ":", "## Get the list of strings to display for this level", "if", "tickStrings", "is", "None", ":", "spacing", ",", "values", "=", "tickLevels", "[", "i", "]", "strings", "=", "self", ".", "tickStrings", "(", "values", ",", "self", ".", "autoSIPrefixScale", "*", "self", ".", "scale", ",", "spacing", ")", "else", ":", "strings", "=", "tickStrings", "[", "i", "]", "if", "len", "(", "strings", ")", "==", "0", ":", "continue", "## ignore strings belonging to ticks that were previously ignored", "for", "j", "in", "range", "(", "len", "(", "strings", ")", ")", ":", "if", "tickPositions", "[", "i", "]", "[", "j", "]", "is", "None", ":", "strings", "[", "j", "]", "=", "None", "## Measure density of text; decide whether to draw this level", "rects", "=", "[", "]", "for", "s", "in", "strings", ":", "if", "s", "is", "None", ":", "rects", ".", "append", "(", "None", ")", "else", ":", "br", "=", "p", ".", "boundingRect", "(", "QtCore", ".", "QRectF", "(", "0", ",", "0", ",", "100", ",", "100", ")", ",", "QtCore", ".", "Qt", ".", "AlignCenter", ",", "asUnicode", "(", "s", ")", ")", "## boundingRect is usually just a bit too large", "## (but this probably depends on per-font metrics?)", "br", ".", "setHeight", "(", "br", ".", "height", "(", ")", "*", "1.4", ")", "rects", ".", "append", "(", "br", ")", "textRects", ".", "append", "(", "rects", "[", "-", "1", "]", ")", "if", "len", "(", "textRects", ")", ">", "0", ":", "## measure all text, make sure there's enough room", "if", "axis", "==", "0", ":", "textSize", "=", "np", ".", "sum", "(", "[", "r", ".", "height", "(", ")", "for", "r", "in", "textRects", "]", ")", "textSize2", "=", "np", ".", "max", "(", "[", "r", ".", "width", "(", ")", "for", "r", "in", "textRects", "]", ")", "else", ":", "textSize", "=", "np", ".", "sum", "(", "[", "r", ".", "width", "(", ")", "for", "r", "in", "textRects", "]", ")", "textSize2", "=", "np", ".", "max", "(", "[", "r", ".", "height", "(", ")", "for", "r", "in", "textRects", "]", ")", "else", ":", "textSize", "=", "0", "textSize2", "=", "0", "if", "i", ">", "0", ":", "## always draw top level", "## If the strings are too crowded, stop drawing text now.", "## We use three different crowding limits based on the number", "## of texts drawn so far.", "textFillRatio", "=", "float", "(", "textSize", ")", "/", "lengthInPixels", "finished", "=", "False", "for", "nTexts", ",", "limit", "in", "self", ".", "style", "[", "'textFillLimits'", "]", ":", "if", "len", "(", "textSpecs", ")", ">=", "nTexts", "and", "textFillRatio", ">=", "limit", ":", "finished", "=", "True", "break", "if", "finished", ":", "break", "# spacing, values = tickLevels[best]", "# strings = self.tickStrings(values, self.scale, spacing)", "# Determine exactly where tick text should be drawn", "for", "j", "in", "range", "(", "len", "(", "strings", ")", ")", ":", "vstr", "=", "strings", "[", "j", "]", "if", "vstr", "is", "None", ":", "## this tick was ignored because it is out of bounds", "continue", "vstr", "=", "asUnicode", "(", "vstr", ")", "x", "=", "tickPositions", "[", "i", "]", "[", "j", "]", "# textRect = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, vstr)", "textRect", "=", "rects", "[", "j", "]", "height", "=", "textRect", ".", "height", "(", ")", "width", "=", "textRect", ".", "width", "(", ")", "# self.textHeight = height", "offset", "=", "max", "(", "0", ",", "self", ".", "style", "[", "'tickLength'", "]", ")", "+", "textOffset", "if", "self", ".", "orientation", "==", "'left'", ":", "textFlags", "=", "QtCore", ".", "Qt", ".", "TextDontClip", "|", "QtCore", ".", "Qt", ".", "AlignRight", "|", "QtCore", ".", "Qt", ".", "AlignVCenter", "rect", "=", "QtCore", ".", "QRectF", "(", "tickStop", "-", "offset", "-", "width", ",", "x", "-", "(", "height", "/", "2", ")", ",", "width", ",", "height", ")", "elif", "self", ".", "orientation", "==", "'right'", ":", "textFlags", "=", "QtCore", ".", "Qt", ".", "TextDontClip", "|", "QtCore", ".", "Qt", ".", "AlignLeft", "|", "QtCore", ".", "Qt", ".", "AlignVCenter", "rect", "=", "QtCore", ".", "QRectF", "(", "tickStop", "+", "offset", ",", "x", "-", "(", "height", "/", "2", ")", ",", "width", ",", "height", ")", "elif", "self", ".", "orientation", "==", "'top'", ":", "textFlags", "=", "QtCore", ".", "Qt", ".", "TextDontClip", "|", "QtCore", ".", "Qt", ".", "AlignCenter", "|", "QtCore", ".", "Qt", ".", "AlignBottom", "rect", "=", "QtCore", ".", "QRectF", "(", "x", "-", "width", "/", "2.", ",", "tickStop", "-", "offset", "-", "height", ",", "width", ",", "height", ")", "elif", "self", ".", "orientation", "==", "'bottom'", ":", "textFlags", "=", "QtCore", ".", "Qt", ".", "TextDontClip", "|", "QtCore", ".", "Qt", ".", "AlignCenter", "|", "QtCore", ".", "Qt", ".", "AlignTop", "rect", "=", "QtCore", ".", "QRectF", "(", "x", "-", "width", "/", "2.", ",", "tickStop", "+", "offset", ",", "width", ",", "height", ")", "# p.setPen(self.pen())", "# p.drawText(rect, textFlags, vstr)", "textSpecs", ".", "append", "(", "(", "rect", ",", "textFlags", ",", "vstr", ")", ")", "profiler", "(", "'compute text'", ")", "## update max text size if needed.", "self", ".", "_updateMaxTextSize", "(", "textSize2", ")", "return", "(", "axisSpec", ",", "tickSpecs", ",", "textSpecs", ")" ]
Calls tickValues() and tickStrings() to determine where and how ticks should be drawn, then generates from this a set of drawing commands to be interpreted by drawPicture().
[ "Calls", "tickValues", "()", "and", "tickStrings", "()", "to", "determine", "where", "and", "how", "ticks", "should", "be", "drawn", "then", "generates", "from", "this", "a", "set", "of", "drawing", "commands", "to", "be", "interpreted", "by", "drawPicture", "()", "." ]
python
train
networks-lab/metaknowledge
metaknowledge/genders/nameGender.py
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/genders/nameGender.py#L54-L66
def nameStringGender(s, noExcept = False): """Expects `first, last`""" global mappingDict try: first = s.split(', ')[1].split(' ')[0].title() except IndexError: if noExcept: return 'Unknown' else: return GenderException("The given String: '{}' does not have a last name, first name pair in with a ', ' seperation.".format(s)) if mappingDict is None: mappingDict = getMapping() return mappingDict.get(first, 'Unknown')
[ "def", "nameStringGender", "(", "s", ",", "noExcept", "=", "False", ")", ":", "global", "mappingDict", "try", ":", "first", "=", "s", ".", "split", "(", "', '", ")", "[", "1", "]", ".", "split", "(", "' '", ")", "[", "0", "]", ".", "title", "(", ")", "except", "IndexError", ":", "if", "noExcept", ":", "return", "'Unknown'", "else", ":", "return", "GenderException", "(", "\"The given String: '{}' does not have a last name, first name pair in with a ', ' seperation.\"", ".", "format", "(", "s", ")", ")", "if", "mappingDict", "is", "None", ":", "mappingDict", "=", "getMapping", "(", ")", "return", "mappingDict", ".", "get", "(", "first", ",", "'Unknown'", ")" ]
Expects `first, last`
[ "Expects", "first", "last" ]
python
train
abe-winter/pg13-py
pg13/pgmock.py
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/pgmock.py#L72-L90
def create(self, ex): "helper for apply_sql in CreateX case" if ex.name in self: if ex.nexists: return raise ValueError('table_exists',ex.name) if any(c.pkey for c in ex.cols): if ex.pkey: raise sqparse2.SQLSyntaxError("don't mix table-level and column-level pkeys",ex) # todo(spec): is multi pkey permitted when defined per column? ex.pkey = sqparse2.PKeyX([c.name for c in ex.cols if c.pkey]) if ex.inherits: # todo: what if child table specifies constraints etc? this needs work. if len(ex.inherits) > 1: raise NotImplementedError('todo: multi-table inherit') parent = self[ex.inherits[0]] = copy.deepcopy(self[ex.inherits[0]]) # copy so rollback works child = self[ex.name] = table.Table(ex.name, parent.fields, parent.pkey) parent.child_tables.append(child) child.parent_table = parent else: self[ex.name]=table.Table(ex.name,ex.cols,ex.pkey.fields if ex.pkey else [])
[ "def", "create", "(", "self", ",", "ex", ")", ":", "if", "ex", ".", "name", "in", "self", ":", "if", "ex", ".", "nexists", ":", "return", "raise", "ValueError", "(", "'table_exists'", ",", "ex", ".", "name", ")", "if", "any", "(", "c", ".", "pkey", "for", "c", "in", "ex", ".", "cols", ")", ":", "if", "ex", ".", "pkey", ":", "raise", "sqparse2", ".", "SQLSyntaxError", "(", "\"don't mix table-level and column-level pkeys\"", ",", "ex", ")", "# todo(spec): is multi pkey permitted when defined per column?\r", "ex", ".", "pkey", "=", "sqparse2", ".", "PKeyX", "(", "[", "c", ".", "name", "for", "c", "in", "ex", ".", "cols", "if", "c", ".", "pkey", "]", ")", "if", "ex", ".", "inherits", ":", "# todo: what if child table specifies constraints etc? this needs work.\r", "if", "len", "(", "ex", ".", "inherits", ")", ">", "1", ":", "raise", "NotImplementedError", "(", "'todo: multi-table inherit'", ")", "parent", "=", "self", "[", "ex", ".", "inherits", "[", "0", "]", "]", "=", "copy", ".", "deepcopy", "(", "self", "[", "ex", ".", "inherits", "[", "0", "]", "]", ")", "# copy so rollback works\r", "child", "=", "self", "[", "ex", ".", "name", "]", "=", "table", ".", "Table", "(", "ex", ".", "name", ",", "parent", ".", "fields", ",", "parent", ".", "pkey", ")", "parent", ".", "child_tables", ".", "append", "(", "child", ")", "child", ".", "parent_table", "=", "parent", "else", ":", "self", "[", "ex", ".", "name", "]", "=", "table", ".", "Table", "(", "ex", ".", "name", ",", "ex", ".", "cols", ",", "ex", ".", "pkey", ".", "fields", "if", "ex", ".", "pkey", "else", "[", "]", ")" ]
helper for apply_sql in CreateX case
[ "helper", "for", "apply_sql", "in", "CreateX", "case" ]
python
train
chrisvoncsefalvay/diffiehellman
diffiehellman/decorators.py
https://github.com/chrisvoncsefalvay/diffiehellman/blob/06e656ea918c6c069d931a4e9443cb4b57d0a0cb/diffiehellman/decorators.py#L32-L44
def requires_private_key(func): """ Decorator for functions that require the private key to be defined. """ def func_wrapper(self, *args, **kwargs): if hasattr(self, "_DiffieHellman__private_key"): func(self, *args, **kwargs) else: self.generate_private_key() func(self, *args, **kwargs) return func_wrapper
[ "def", "requires_private_key", "(", "func", ")", ":", "def", "func_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "self", ",", "\"_DiffieHellman__private_key\"", ")", ":", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "self", ".", "generate_private_key", "(", ")", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "func_wrapper" ]
Decorator for functions that require the private key to be defined.
[ "Decorator", "for", "functions", "that", "require", "the", "private", "key", "to", "be", "defined", "." ]
python
train
pyroscope/pyrocore
src/pyrocore/torrent/engine.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/torrent/engine.py#L304-L331
def add_manifold_attribute(cls, name): """ Register a manifold engine attribute. @return: field definition object, or None if "name" isn't a manifold attribute. """ if name.startswith("custom_"): try: return FieldDefinition.FIELDS[name] except KeyError: field = OnDemandField(fmt.to_unicode, name, "custom attribute %r" % name.split('_', 1)[1], matcher=matching.PatternFilter) setattr(cls, name, field) # add field to all proxy objects return field elif name.startswith("kind_") and name[5:].isdigit(): try: return FieldDefinition.FIELDS[name] except KeyError: limit = int(name[5:].lstrip('0') or '0', 10) if limit > 100: raise error.UserError("kind_N: N > 100 in %r" % name) field = OnDemandField(set, name, "kinds of files that make up more than %d%% of this item's size" % limit, matcher=matching.TaggedAsFilter, formatter=_fmt_tags, engine_name="kind_%d" % limit) setattr(cls, name, field) return field
[ "def", "add_manifold_attribute", "(", "cls", ",", "name", ")", ":", "if", "name", ".", "startswith", "(", "\"custom_\"", ")", ":", "try", ":", "return", "FieldDefinition", ".", "FIELDS", "[", "name", "]", "except", "KeyError", ":", "field", "=", "OnDemandField", "(", "fmt", ".", "to_unicode", ",", "name", ",", "\"custom attribute %r\"", "%", "name", ".", "split", "(", "'_'", ",", "1", ")", "[", "1", "]", ",", "matcher", "=", "matching", ".", "PatternFilter", ")", "setattr", "(", "cls", ",", "name", ",", "field", ")", "# add field to all proxy objects", "return", "field", "elif", "name", ".", "startswith", "(", "\"kind_\"", ")", "and", "name", "[", "5", ":", "]", ".", "isdigit", "(", ")", ":", "try", ":", "return", "FieldDefinition", ".", "FIELDS", "[", "name", "]", "except", "KeyError", ":", "limit", "=", "int", "(", "name", "[", "5", ":", "]", ".", "lstrip", "(", "'0'", ")", "or", "'0'", ",", "10", ")", "if", "limit", ">", "100", ":", "raise", "error", ".", "UserError", "(", "\"kind_N: N > 100 in %r\"", "%", "name", ")", "field", "=", "OnDemandField", "(", "set", ",", "name", ",", "\"kinds of files that make up more than %d%% of this item's size\"", "%", "limit", ",", "matcher", "=", "matching", ".", "TaggedAsFilter", ",", "formatter", "=", "_fmt_tags", ",", "engine_name", "=", "\"kind_%d\"", "%", "limit", ")", "setattr", "(", "cls", ",", "name", ",", "field", ")", "return", "field" ]
Register a manifold engine attribute. @return: field definition object, or None if "name" isn't a manifold attribute.
[ "Register", "a", "manifold", "engine", "attribute", "." ]
python
train
tango-controls/pytango
tango/utils.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/utils.py#L999-L1022
def str_2_obj(obj_str, tg_type=None): """Converts a string into an object according to the given tango type :param obj_str: the string to be converted :type obj_str: :py:obj:`str` :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :return: an object calculated from the given string :rtype: :py:obj:`object` """ if tg_type is None: return obj_str f = str if is_scalar_type(tg_type): if is_numerical_type(tg_type): if obj_str in __NO_STR_VALUE: return None if is_int_type(tg_type): f = int elif is_float_type(tg_type): f = float elif is_bool_type(tg_type): f = bool_ return f(obj_str)
[ "def", "str_2_obj", "(", "obj_str", ",", "tg_type", "=", "None", ")", ":", "if", "tg_type", "is", "None", ":", "return", "obj_str", "f", "=", "str", "if", "is_scalar_type", "(", "tg_type", ")", ":", "if", "is_numerical_type", "(", "tg_type", ")", ":", "if", "obj_str", "in", "__NO_STR_VALUE", ":", "return", "None", "if", "is_int_type", "(", "tg_type", ")", ":", "f", "=", "int", "elif", "is_float_type", "(", "tg_type", ")", ":", "f", "=", "float", "elif", "is_bool_type", "(", "tg_type", ")", ":", "f", "=", "bool_", "return", "f", "(", "obj_str", ")" ]
Converts a string into an object according to the given tango type :param obj_str: the string to be converted :type obj_str: :py:obj:`str` :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :return: an object calculated from the given string :rtype: :py:obj:`object`
[ "Converts", "a", "string", "into", "an", "object", "according", "to", "the", "given", "tango", "type" ]
python
train
openego/eDisGo
edisgo/flex_opt/curtailment.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/flex_opt/curtailment.py#L343-L366
def _check_curtailment_target(curtailment, curtailment_target, curtailment_key): """ Raises an error if curtailment target was not met in any time step. Parameters ----------- curtailment : :pandas:`pandas:DataFrame<dataframe>` Dataframe containing the curtailment in kW per generator and time step. Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are the generator representatives. curtailment_target : :pandas:`pandas.Series<series>` The curtailment in kW that was to be distributed amongst the generators. Index of the series is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` The technology and weather cell ID if :obj:`tuple` or only the technology if :obj:`str` the curtailment was specified for. """ if not (abs(curtailment.sum(axis=1) - curtailment_target) < 1e-1).all(): message = 'Curtailment target not met for {}.'.format(curtailment_key) logging.error(message) raise TypeError(message)
[ "def", "_check_curtailment_target", "(", "curtailment", ",", "curtailment_target", ",", "curtailment_key", ")", ":", "if", "not", "(", "abs", "(", "curtailment", ".", "sum", "(", "axis", "=", "1", ")", "-", "curtailment_target", ")", "<", "1e-1", ")", ".", "all", "(", ")", ":", "message", "=", "'Curtailment target not met for {}.'", ".", "format", "(", "curtailment_key", ")", "logging", ".", "error", "(", "message", ")", "raise", "TypeError", "(", "message", ")" ]
Raises an error if curtailment target was not met in any time step. Parameters ----------- curtailment : :pandas:`pandas:DataFrame<dataframe>` Dataframe containing the curtailment in kW per generator and time step. Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are the generator representatives. curtailment_target : :pandas:`pandas.Series<series>` The curtailment in kW that was to be distributed amongst the generators. Index of the series is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` The technology and weather cell ID if :obj:`tuple` or only the technology if :obj:`str` the curtailment was specified for.
[ "Raises", "an", "error", "if", "curtailment", "target", "was", "not", "met", "in", "any", "time", "step", "." ]
python
train
fastai/fastai
fastai/text/transform.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/text/transform.py#L116-L120
def process_all(self, texts:Collection[str]) -> List[List[str]]: "Process a list of `texts`." if self.n_cpus <= 1: return self._process_all_1(texts) with ProcessPoolExecutor(self.n_cpus) as e: return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), [])
[ "def", "process_all", "(", "self", ",", "texts", ":", "Collection", "[", "str", "]", ")", "->", "List", "[", "List", "[", "str", "]", "]", ":", "if", "self", ".", "n_cpus", "<=", "1", ":", "return", "self", ".", "_process_all_1", "(", "texts", ")", "with", "ProcessPoolExecutor", "(", "self", ".", "n_cpus", ")", "as", "e", ":", "return", "sum", "(", "e", ".", "map", "(", "self", ".", "_process_all_1", ",", "partition_by_cores", "(", "texts", ",", "self", ".", "n_cpus", ")", ")", ",", "[", "]", ")" ]
Process a list of `texts`.
[ "Process", "a", "list", "of", "texts", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xnodewidget/xnodescene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodescene.py#L227-L247
def addConnection( self, cls = None ): """ Creates a new connection instance in the scene. If the optional cls \ parameter is not supplied, then the default connection class will \ be used when creating the connection. :param cls subclass of <XNodeConnection> :return <XNodeConnection> || None """ # make sure we have a valid class if ( not cls ): cls = self.defaultConnectionClass() if ( not cls ): return None # create the new connection connection = cls(self) connection.setLayer(self.currentLayer()) self.addItem(connection) return connection
[ "def", "addConnection", "(", "self", ",", "cls", "=", "None", ")", ":", "# make sure we have a valid class", "if", "(", "not", "cls", ")", ":", "cls", "=", "self", ".", "defaultConnectionClass", "(", ")", "if", "(", "not", "cls", ")", ":", "return", "None", "# create the new connection", "connection", "=", "cls", "(", "self", ")", "connection", ".", "setLayer", "(", "self", ".", "currentLayer", "(", ")", ")", "self", ".", "addItem", "(", "connection", ")", "return", "connection" ]
Creates a new connection instance in the scene. If the optional cls \ parameter is not supplied, then the default connection class will \ be used when creating the connection. :param cls subclass of <XNodeConnection> :return <XNodeConnection> || None
[ "Creates", "a", "new", "connection", "instance", "in", "the", "scene", ".", "If", "the", "optional", "cls", "\\", "parameter", "is", "not", "supplied", "then", "the", "default", "connection", "class", "will", "\\", "be", "used", "when", "creating", "the", "connection", ".", ":", "param", "cls", "subclass", "of", "<XNodeConnection", ">", ":", "return", "<XNodeConnection", ">", "||", "None" ]
python
train
androguard/androguard
androguard/core/analysis/analysis.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/analysis/analysis.py#L1658-L1701
def get_permissions(self, apilevel=None): """ Returns the permissions and the API method based on the API level specified. This can be used to find usage of API methods which require a permission. Should be used in combination with an :class:`~androguard.core.bytecodes.apk.APK`. The returned permissions are a list, as some API methods require multiple permissions at once. The following example shows the usage and how to get the calling methods using XREF: example:: from androguard.misc import AnalyzeAPK a, d, dx = AnalyzeAPK("somefile.apk") for meth, perm in dx.get_permissions(a.get_effective_target_sdk_version()): print("Using API method {} for permission {}".format(meth, perm)) print("used in:") for _, m, _ in meth.get_xref_from(): print(m.full_name) ..note:: This method might be unreliable and might not extract all used permissions. The permission mapping is based on [Axplorer](https://github.com/reddr/axplorer) and might be incomplete due to the nature of the extraction process. Unfortunately, there is no official API<->Permission mapping. The output of this method relies also on the set API level. If the wrong API level is used, the results might be wrong. :param apilevel: API level to load, or None for default :return: yields tuples of :class:`MethodClassAnalysis` (of the API method) and list of permission string """ # TODO maybe have the API level loading in the __init__ method and pass the APK as well? permmap = load_api_specific_resource_module('api_permission_mappings', apilevel) if not permmap: raise ValueError("No permission mapping found! Is one available? " "The requested API level was '{}'".format(apilevel)) for cls in self.get_external_classes(): for meth_analysis in cls.get_methods(): meth = meth_analysis.get_method() if meth.permission_api_name in permmap: yield meth_analysis, permmap[meth.permission_api_name]
[ "def", "get_permissions", "(", "self", ",", "apilevel", "=", "None", ")", ":", "# TODO maybe have the API level loading in the __init__ method and pass the APK as well?", "permmap", "=", "load_api_specific_resource_module", "(", "'api_permission_mappings'", ",", "apilevel", ")", "if", "not", "permmap", ":", "raise", "ValueError", "(", "\"No permission mapping found! Is one available? \"", "\"The requested API level was '{}'\"", ".", "format", "(", "apilevel", ")", ")", "for", "cls", "in", "self", ".", "get_external_classes", "(", ")", ":", "for", "meth_analysis", "in", "cls", ".", "get_methods", "(", ")", ":", "meth", "=", "meth_analysis", ".", "get_method", "(", ")", "if", "meth", ".", "permission_api_name", "in", "permmap", ":", "yield", "meth_analysis", ",", "permmap", "[", "meth", ".", "permission_api_name", "]" ]
Returns the permissions and the API method based on the API level specified. This can be used to find usage of API methods which require a permission. Should be used in combination with an :class:`~androguard.core.bytecodes.apk.APK`. The returned permissions are a list, as some API methods require multiple permissions at once. The following example shows the usage and how to get the calling methods using XREF: example:: from androguard.misc import AnalyzeAPK a, d, dx = AnalyzeAPK("somefile.apk") for meth, perm in dx.get_permissions(a.get_effective_target_sdk_version()): print("Using API method {} for permission {}".format(meth, perm)) print("used in:") for _, m, _ in meth.get_xref_from(): print(m.full_name) ..note:: This method might be unreliable and might not extract all used permissions. The permission mapping is based on [Axplorer](https://github.com/reddr/axplorer) and might be incomplete due to the nature of the extraction process. Unfortunately, there is no official API<->Permission mapping. The output of this method relies also on the set API level. If the wrong API level is used, the results might be wrong. :param apilevel: API level to load, or None for default :return: yields tuples of :class:`MethodClassAnalysis` (of the API method) and list of permission string
[ "Returns", "the", "permissions", "and", "the", "API", "method", "based", "on", "the", "API", "level", "specified", ".", "This", "can", "be", "used", "to", "find", "usage", "of", "API", "methods", "which", "require", "a", "permission", ".", "Should", "be", "used", "in", "combination", "with", "an", ":", "class", ":", "~androguard", ".", "core", ".", "bytecodes", ".", "apk", ".", "APK", "." ]
python
train
djm/python-scrapyd-api
scrapyd_api/wrapper.py
https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L120-L130
def job_status(self, project, job_id): """ Retrieves the 'status' of a specific job specified by its id. Derived, utilises Scrapyd's list jobs endpoint to provide the answer. """ all_jobs = self.list_jobs(project) for state in constants.JOB_STATES: job_ids = [job['id'] for job in all_jobs[state]] if job_id in job_ids: return state return ''
[ "def", "job_status", "(", "self", ",", "project", ",", "job_id", ")", ":", "all_jobs", "=", "self", ".", "list_jobs", "(", "project", ")", "for", "state", "in", "constants", ".", "JOB_STATES", ":", "job_ids", "=", "[", "job", "[", "'id'", "]", "for", "job", "in", "all_jobs", "[", "state", "]", "]", "if", "job_id", "in", "job_ids", ":", "return", "state", "return", "''" ]
Retrieves the 'status' of a specific job specified by its id. Derived, utilises Scrapyd's list jobs endpoint to provide the answer.
[ "Retrieves", "the", "status", "of", "a", "specific", "job", "specified", "by", "its", "id", ".", "Derived", "utilises", "Scrapyd", "s", "list", "jobs", "endpoint", "to", "provide", "the", "answer", "." ]
python
train
Crunch-io/crunch-cube
src/cr/cube/crunch_cube.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L1256-L1305
def prune_indices(self, transforms=None): """Return indices of pruned rows and columns as list. The return value has one of three possible forms: * a 1-element list of row indices (in case of 1D cube) * 2-element list of row and col indices (in case of 2D cube) * n-element list of tuples of 2 elements (if it's 3D cube). For each case, the 2 elements are the ROW and COL indices of the elements that need to be pruned. If it's a 3D cube, these indices are calculated "per slice", that is NOT on the 0th dimension (as the 0th dimension represents the slices). """ if self.ndim >= 3: # In case of a 3D cube, return list of tuples # (of row and col pruned indices). return self._prune_3d_indices(transforms) def prune_non_3d_indices(transforms): row_margin = self._pruning_base( hs_dims=transforms, axis=self.row_direction_axis ) row_indices = self._margin_pruned_indices( row_margin, self._inserted_dim_inds(transforms, 0), 0 ) if row_indices.ndim > 1: # In case of MR, we'd have 2D prune indices row_indices = row_indices.all(axis=1) if self.ndim == 1: return [row_indices] col_margin = self._pruning_base( hs_dims=transforms, axis=self._col_direction_axis ) col_indices = self._margin_pruned_indices( col_margin, self._inserted_dim_inds(transforms, 1), 1 ) if col_indices.ndim > 1: # In case of MR, we'd have 2D prune indices col_indices = col_indices.all(axis=0) return [row_indices, col_indices] # In case of 1 or 2 D cubes, return a list of # row indices (or row and col indices) return prune_non_3d_indices(transforms)
[ "def", "prune_indices", "(", "self", ",", "transforms", "=", "None", ")", ":", "if", "self", ".", "ndim", ">=", "3", ":", "# In case of a 3D cube, return list of tuples", "# (of row and col pruned indices).", "return", "self", ".", "_prune_3d_indices", "(", "transforms", ")", "def", "prune_non_3d_indices", "(", "transforms", ")", ":", "row_margin", "=", "self", ".", "_pruning_base", "(", "hs_dims", "=", "transforms", ",", "axis", "=", "self", ".", "row_direction_axis", ")", "row_indices", "=", "self", ".", "_margin_pruned_indices", "(", "row_margin", ",", "self", ".", "_inserted_dim_inds", "(", "transforms", ",", "0", ")", ",", "0", ")", "if", "row_indices", ".", "ndim", ">", "1", ":", "# In case of MR, we'd have 2D prune indices", "row_indices", "=", "row_indices", ".", "all", "(", "axis", "=", "1", ")", "if", "self", ".", "ndim", "==", "1", ":", "return", "[", "row_indices", "]", "col_margin", "=", "self", ".", "_pruning_base", "(", "hs_dims", "=", "transforms", ",", "axis", "=", "self", ".", "_col_direction_axis", ")", "col_indices", "=", "self", ".", "_margin_pruned_indices", "(", "col_margin", ",", "self", ".", "_inserted_dim_inds", "(", "transforms", ",", "1", ")", ",", "1", ")", "if", "col_indices", ".", "ndim", ">", "1", ":", "# In case of MR, we'd have 2D prune indices", "col_indices", "=", "col_indices", ".", "all", "(", "axis", "=", "0", ")", "return", "[", "row_indices", ",", "col_indices", "]", "# In case of 1 or 2 D cubes, return a list of", "# row indices (or row and col indices)", "return", "prune_non_3d_indices", "(", "transforms", ")" ]
Return indices of pruned rows and columns as list. The return value has one of three possible forms: * a 1-element list of row indices (in case of 1D cube) * 2-element list of row and col indices (in case of 2D cube) * n-element list of tuples of 2 elements (if it's 3D cube). For each case, the 2 elements are the ROW and COL indices of the elements that need to be pruned. If it's a 3D cube, these indices are calculated "per slice", that is NOT on the 0th dimension (as the 0th dimension represents the slices).
[ "Return", "indices", "of", "pruned", "rows", "and", "columns", "as", "list", "." ]
python
train
aio-libs/aiohttp
aiohttp/web_request.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/web_request.py#L528-L533
def has_body(self) -> bool: """Return True if request's HTTP BODY can be read, False otherwise.""" warnings.warn( "Deprecated, use .can_read_body #2005", DeprecationWarning, stacklevel=2) return not self._payload.at_eof()
[ "def", "has_body", "(", "self", ")", "->", "bool", ":", "warnings", ".", "warn", "(", "\"Deprecated, use .can_read_body #2005\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "return", "not", "self", ".", "_payload", ".", "at_eof", "(", ")" ]
Return True if request's HTTP BODY can be read, False otherwise.
[ "Return", "True", "if", "request", "s", "HTTP", "BODY", "can", "be", "read", "False", "otherwise", "." ]
python
train
EventRegistry/event-registry-python
eventregistry/QueryEvents.py
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryEvents.py#L211-L220
def count(self, eventRegistry): """ return the number of events that match the criteria """ self.setRequestedResult(RequestEventsInfo()) res = eventRegistry.execQuery(self) if "error" in res: print(res["error"]) count = res.get("events", {}).get("totalResults", 0) return count
[ "def", "count", "(", "self", ",", "eventRegistry", ")", ":", "self", ".", "setRequestedResult", "(", "RequestEventsInfo", "(", ")", ")", "res", "=", "eventRegistry", ".", "execQuery", "(", "self", ")", "if", "\"error\"", "in", "res", ":", "print", "(", "res", "[", "\"error\"", "]", ")", "count", "=", "res", ".", "get", "(", "\"events\"", ",", "{", "}", ")", ".", "get", "(", "\"totalResults\"", ",", "0", ")", "return", "count" ]
return the number of events that match the criteria
[ "return", "the", "number", "of", "events", "that", "match", "the", "criteria" ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/tools/pdf_to_booklet.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L192-L198
def require(executable: str, explanation: str = "") -> None: """ Ensures that the external tool is available. Asserts upon failure. """ assert shutil.which(executable), "Need {!r} on the PATH.{}".format( executable, "\n" + explanation if explanation else "")
[ "def", "require", "(", "executable", ":", "str", ",", "explanation", ":", "str", "=", "\"\"", ")", "->", "None", ":", "assert", "shutil", ".", "which", "(", "executable", ")", ",", "\"Need {!r} on the PATH.{}\"", ".", "format", "(", "executable", ",", "\"\\n\"", "+", "explanation", "if", "explanation", "else", "\"\"", ")" ]
Ensures that the external tool is available. Asserts upon failure.
[ "Ensures", "that", "the", "external", "tool", "is", "available", ".", "Asserts", "upon", "failure", "." ]
python
train
sepandhaghighi/pycm
pycm/pycm_compare.py
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_compare.py#L108-L132
def save_report( self, name, address=True): """ Save Compare report in .comp (flat file format). :param name: filename :type name : str :param address: flag for address return :type address : bool :return: saving Status as dict {"Status":bool , "Message":str} """ try: message = None file = open(name + ".comp", "w") report = compare_report_print( self.sorted, self.scores, self.best_name) file.write(report) file.close() if address: message = os.path.join(os.getcwd(), name + ".comp") return {"Status": True, "Message": message} except Exception as e: return {"Status": False, "Message": str(e)}
[ "def", "save_report", "(", "self", ",", "name", ",", "address", "=", "True", ")", ":", "try", ":", "message", "=", "None", "file", "=", "open", "(", "name", "+", "\".comp\"", ",", "\"w\"", ")", "report", "=", "compare_report_print", "(", "self", ".", "sorted", ",", "self", ".", "scores", ",", "self", ".", "best_name", ")", "file", ".", "write", "(", "report", ")", "file", ".", "close", "(", ")", "if", "address", ":", "message", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "name", "+", "\".comp\"", ")", "return", "{", "\"Status\"", ":", "True", ",", "\"Message\"", ":", "message", "}", "except", "Exception", "as", "e", ":", "return", "{", "\"Status\"", ":", "False", ",", "\"Message\"", ":", "str", "(", "e", ")", "}" ]
Save Compare report in .comp (flat file format). :param name: filename :type name : str :param address: flag for address return :type address : bool :return: saving Status as dict {"Status":bool , "Message":str}
[ "Save", "Compare", "report", "in", ".", "comp", "(", "flat", "file", "format", ")", "." ]
python
train
allianceauth/allianceauth
allianceauth/eveonline/autogroups/signals.py
https://github.com/allianceauth/allianceauth/blob/6585b07e96571a99a4d6dc03cc03f9b8c8f690ca/allianceauth/eveonline/autogroups/signals.py#L45-L49
def check_groups_on_profile_update(sender, instance, created, *args, **kwargs): """ Trigger check when main character or state changes. """ AutogroupsConfig.objects.update_groups_for_user(instance.user)
[ "def", "check_groups_on_profile_update", "(", "sender", ",", "instance", ",", "created", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "AutogroupsConfig", ".", "objects", ".", "update_groups_for_user", "(", "instance", ".", "user", ")" ]
Trigger check when main character or state changes.
[ "Trigger", "check", "when", "main", "character", "or", "state", "changes", "." ]
python
train
HumanCellAtlas/dcp-cli
hca/dss/__init__.py
https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/__init__.py#L211-L237
def _download_file(self, dss_file, dest_path, num_retries=10, min_delay_seconds=0.25): """ Attempt to download the data. If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and decreases each time we successfully read a block. We set a quota for the number of failures that goes up with every successful block read and down with each failure. If we can, we will attempt HTTP resume. However, we verify that the server supports HTTP resume. If the ranged get doesn't yield the correct header, then we start over. """ directory, _ = os.path.split(dest_path) if directory: try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise with atomic_write(dest_path, mode="wb", overwrite=True) as fh: if dss_file.size == 0: return download_hash = self._do_download_file(dss_file, fh, num_retries, min_delay_seconds) if download_hash.lower() != dss_file.sha256.lower(): # No need to delete what's been written. atomic_write ensures we're cleaned up logger.error("%s", "File {}: GET FAILED. Checksum mismatch.".format(dss_file.uuid)) raise ValueError("Expected sha256 {} Received sha256 {}".format( dss_file.sha256.lower(), download_hash.lower()))
[ "def", "_download_file", "(", "self", ",", "dss_file", ",", "dest_path", ",", "num_retries", "=", "10", ",", "min_delay_seconds", "=", "0.25", ")", ":", "directory", ",", "_", "=", "os", ".", "path", ".", "split", "(", "dest_path", ")", "if", "directory", ":", "try", ":", "os", ".", "makedirs", "(", "directory", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "with", "atomic_write", "(", "dest_path", ",", "mode", "=", "\"wb\"", ",", "overwrite", "=", "True", ")", "as", "fh", ":", "if", "dss_file", ".", "size", "==", "0", ":", "return", "download_hash", "=", "self", ".", "_do_download_file", "(", "dss_file", ",", "fh", ",", "num_retries", ",", "min_delay_seconds", ")", "if", "download_hash", ".", "lower", "(", ")", "!=", "dss_file", ".", "sha256", ".", "lower", "(", ")", ":", "# No need to delete what's been written. atomic_write ensures we're cleaned up", "logger", ".", "error", "(", "\"%s\"", ",", "\"File {}: GET FAILED. Checksum mismatch.\"", ".", "format", "(", "dss_file", ".", "uuid", ")", ")", "raise", "ValueError", "(", "\"Expected sha256 {} Received sha256 {}\"", ".", "format", "(", "dss_file", ".", "sha256", ".", "lower", "(", ")", ",", "download_hash", ".", "lower", "(", ")", ")", ")" ]
Attempt to download the data. If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and decreases each time we successfully read a block. We set a quota for the number of failures that goes up with every successful block read and down with each failure. If we can, we will attempt HTTP resume. However, we verify that the server supports HTTP resume. If the ranged get doesn't yield the correct header, then we start over.
[ "Attempt", "to", "download", "the", "data", ".", "If", "a", "retryable", "exception", "occurs", "we", "wait", "a", "bit", "and", "retry", "again", ".", "The", "delay", "increases", "each", "time", "we", "fail", "and", "decreases", "each", "time", "we", "successfully", "read", "a", "block", ".", "We", "set", "a", "quota", "for", "the", "number", "of", "failures", "that", "goes", "up", "with", "every", "successful", "block", "read", "and", "down", "with", "each", "failure", "." ]
python
train
shoebot/shoebot
shoebot/grammar/livecode.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/grammar/livecode.py#L125-L133
def call_bad_cb(self, tb): """ If bad_cb returns True then keep it :param tb: traceback that caused exception :return: """ with LiveExecution.lock: if self.bad_cb and not self.bad_cb(tb): self.bad_cb = None
[ "def", "call_bad_cb", "(", "self", ",", "tb", ")", ":", "with", "LiveExecution", ".", "lock", ":", "if", "self", ".", "bad_cb", "and", "not", "self", ".", "bad_cb", "(", "tb", ")", ":", "self", ".", "bad_cb", "=", "None" ]
If bad_cb returns True then keep it :param tb: traceback that caused exception :return:
[ "If", "bad_cb", "returns", "True", "then", "keep", "it", ":", "param", "tb", ":", "traceback", "that", "caused", "exception", ":", "return", ":" ]
python
valid
thespacedoctor/qubits
qubits/datagenerator.py
https://github.com/thespacedoctor/qubits/blob/3c02ace7226389841c6bb838d045c11bed61a3c2/qubits/datagenerator.py#L598-L661
def generate_kcorrection_listing_database( log, restFrameFilter, pathToOutputDirectory, pathToSpectralDatabase, temporalResolution=4.0, redshiftResolution=0.1, redshiftLower=0.0, redshiftUpper=1.0): """ *Generate the Kg* k-corrections for a range of redshifts given a list of spectra* **Key Arguments:** - ``log`` -- logger - ``restFrameFilter`` -- the filter to generate the K-corrections against - ``pathToOutputDirectory`` -- path to the output directory (provided by the user) - ``pathToSpectralDatabase`` -- path to the directory containing the spectral database - ``temporalResolution`` -- temporal resolution at which to calculate the k-correcions - ``redshiftResolution`` -- resolution of the k-correction database (at what redshift points do you want the k-corrections calculated) - ``redshiftLower`` -- lower redshift in range of k-corrections to be calculated - ``redshiftUpper`` -- upper redshift in range of k-corrections to be calculated **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## import re import os import shutil ## THIRD PARTY ## import pysynphot as syn import yaml ## LOCAL APPLICATION ## mul = 10000 div = 10000. fileName = pathToOutputDirectory + "/transient_light_curves.yaml" stream = file(fileName, 'r') generatedLCs = yaml.load(stream) # REMOVE OLD DATABASE try: shutil.rmtree(pathToOutputDirectory + "/k_corrections") except: pass models = generatedLCs.keys() for model in models: for redshift in range(int(redshiftLower * mul), int(redshiftUpper * mul), int(redshiftResolution * mul)): redshift = redshift / div generate_single_kcorrection_listing( log, pathToOutputDirectory=pathToOutputDirectory, pathToSpectralDatabase=pathToSpectralDatabase, model=model, redshift=redshift, restFrameFilter=restFrameFilter, temporalResolution=temporalResolution) return
[ "def", "generate_kcorrection_listing_database", "(", "log", ",", "restFrameFilter", ",", "pathToOutputDirectory", ",", "pathToSpectralDatabase", ",", "temporalResolution", "=", "4.0", ",", "redshiftResolution", "=", "0.1", ",", "redshiftLower", "=", "0.0", ",", "redshiftUpper", "=", "1.0", ")", ":", "################ > IMPORTS ################", "## STANDARD LIB ##", "import", "re", "import", "os", "import", "shutil", "## THIRD PARTY ##", "import", "pysynphot", "as", "syn", "import", "yaml", "## LOCAL APPLICATION ##", "mul", "=", "10000", "div", "=", "10000.", "fileName", "=", "pathToOutputDirectory", "+", "\"/transient_light_curves.yaml\"", "stream", "=", "file", "(", "fileName", ",", "'r'", ")", "generatedLCs", "=", "yaml", ".", "load", "(", "stream", ")", "# REMOVE OLD DATABASE", "try", ":", "shutil", ".", "rmtree", "(", "pathToOutputDirectory", "+", "\"/k_corrections\"", ")", "except", ":", "pass", "models", "=", "generatedLCs", ".", "keys", "(", ")", "for", "model", "in", "models", ":", "for", "redshift", "in", "range", "(", "int", "(", "redshiftLower", "*", "mul", ")", ",", "int", "(", "redshiftUpper", "*", "mul", ")", ",", "int", "(", "redshiftResolution", "*", "mul", ")", ")", ":", "redshift", "=", "redshift", "/", "div", "generate_single_kcorrection_listing", "(", "log", ",", "pathToOutputDirectory", "=", "pathToOutputDirectory", ",", "pathToSpectralDatabase", "=", "pathToSpectralDatabase", ",", "model", "=", "model", ",", "redshift", "=", "redshift", ",", "restFrameFilter", "=", "restFrameFilter", ",", "temporalResolution", "=", "temporalResolution", ")", "return" ]
*Generate the Kg* k-corrections for a range of redshifts given a list of spectra* **Key Arguments:** - ``log`` -- logger - ``restFrameFilter`` -- the filter to generate the K-corrections against - ``pathToOutputDirectory`` -- path to the output directory (provided by the user) - ``pathToSpectralDatabase`` -- path to the directory containing the spectral database - ``temporalResolution`` -- temporal resolution at which to calculate the k-correcions - ``redshiftResolution`` -- resolution of the k-correction database (at what redshift points do you want the k-corrections calculated) - ``redshiftLower`` -- lower redshift in range of k-corrections to be calculated - ``redshiftUpper`` -- upper redshift in range of k-corrections to be calculated **Return:** - None
[ "*", "Generate", "the", "Kg", "*", "k", "-", "corrections", "for", "a", "range", "of", "redshifts", "given", "a", "list", "of", "spectra", "*" ]
python
train
apache/incubator-mxnet
example/rcnn/symdata/bbox.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rcnn/symdata/bbox.py#L61-L76
def clip_boxes(boxes, im_shape): """ Clip boxes to image boundaries. :param boxes: [N, 4* num_classes] :param im_shape: tuple of 2 :return: [N, 4* num_classes] """ # x1 >= 0 boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0) # y1 >= 0 boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0) # x2 < im_shape[1] boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0) # y2 < im_shape[0] boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0) return boxes
[ "def", "clip_boxes", "(", "boxes", ",", "im_shape", ")", ":", "# x1 >= 0", "boxes", "[", ":", ",", "0", ":", ":", "4", "]", "=", "np", ".", "maximum", "(", "np", ".", "minimum", "(", "boxes", "[", ":", ",", "0", ":", ":", "4", "]", ",", "im_shape", "[", "1", "]", "-", "1", ")", ",", "0", ")", "# y1 >= 0", "boxes", "[", ":", ",", "1", ":", ":", "4", "]", "=", "np", ".", "maximum", "(", "np", ".", "minimum", "(", "boxes", "[", ":", ",", "1", ":", ":", "4", "]", ",", "im_shape", "[", "0", "]", "-", "1", ")", ",", "0", ")", "# x2 < im_shape[1]", "boxes", "[", ":", ",", "2", ":", ":", "4", "]", "=", "np", ".", "maximum", "(", "np", ".", "minimum", "(", "boxes", "[", ":", ",", "2", ":", ":", "4", "]", ",", "im_shape", "[", "1", "]", "-", "1", ")", ",", "0", ")", "# y2 < im_shape[0]", "boxes", "[", ":", ",", "3", ":", ":", "4", "]", "=", "np", ".", "maximum", "(", "np", ".", "minimum", "(", "boxes", "[", ":", ",", "3", ":", ":", "4", "]", ",", "im_shape", "[", "0", "]", "-", "1", ")", ",", "0", ")", "return", "boxes" ]
Clip boxes to image boundaries. :param boxes: [N, 4* num_classes] :param im_shape: tuple of 2 :return: [N, 4* num_classes]
[ "Clip", "boxes", "to", "image", "boundaries", ".", ":", "param", "boxes", ":", "[", "N", "4", "*", "num_classes", "]", ":", "param", "im_shape", ":", "tuple", "of", "2", ":", "return", ":", "[", "N", "4", "*", "num_classes", "]" ]
python
train
spotify/gordon
gordon/main.py
https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/main.py#L68-L74
def _deep_merge_dict(a, b): """Additively merge right side dict into left side dict.""" for k, v in b.items(): if k in a and isinstance(a[k], dict) and isinstance(v, dict): _deep_merge_dict(a[k], v) else: a[k] = v
[ "def", "_deep_merge_dict", "(", "a", ",", "b", ")", ":", "for", "k", ",", "v", "in", "b", ".", "items", "(", ")", ":", "if", "k", "in", "a", "and", "isinstance", "(", "a", "[", "k", "]", ",", "dict", ")", "and", "isinstance", "(", "v", ",", "dict", ")", ":", "_deep_merge_dict", "(", "a", "[", "k", "]", ",", "v", ")", "else", ":", "a", "[", "k", "]", "=", "v" ]
Additively merge right side dict into left side dict.
[ "Additively", "merge", "right", "side", "dict", "into", "left", "side", "dict", "." ]
python
train
jleclanche/fireplace
fireplace/card.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/card.py#L328-L339
def battlecry_requires_target(self): """ True if the play action of the card requires a target """ if self.has_combo and self.controller.combo: if PlayReq.REQ_TARGET_FOR_COMBO in self.requirements: return True for req in TARGETING_PREREQUISITES: if req in self.requirements: return True return False
[ "def", "battlecry_requires_target", "(", "self", ")", ":", "if", "self", ".", "has_combo", "and", "self", ".", "controller", ".", "combo", ":", "if", "PlayReq", ".", "REQ_TARGET_FOR_COMBO", "in", "self", ".", "requirements", ":", "return", "True", "for", "req", "in", "TARGETING_PREREQUISITES", ":", "if", "req", "in", "self", ".", "requirements", ":", "return", "True", "return", "False" ]
True if the play action of the card requires a target
[ "True", "if", "the", "play", "action", "of", "the", "card", "requires", "a", "target" ]
python
train
saltstack/salt
salt/states/cmd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cmd.py#L251-L310
def _reinterpreted_state(state): ''' Re-interpret the state returned by salt.state.run using our protocol. ''' ret = state['changes'] state['changes'] = {} state['comment'] = '' out = ret.get('stdout') if not out: if ret.get('stderr'): state['comment'] = ret['stderr'] return state is_json = False try: data = salt.utils.json.loads(out) if not isinstance(data, dict): return _failout( state, 'script JSON output must be a JSON object (e.g., {})!' ) is_json = True except ValueError: idx = out.rstrip().rfind('\n') if idx != -1: out = out[idx + 1:] data = {} try: for item in salt.utils.args.shlex_split(out): key, val = item.split('=') data[key] = val except ValueError: state = _failout( state, 'Failed parsing script output! ' 'Stdout must be JSON or a line of name=value pairs.' ) state['changes'].update(ret) return state changed = _is_true(data.get('changed', 'no')) if 'comment' in data: state['comment'] = data['comment'] del data['comment'] if changed: for key in ret: data.setdefault(key, ret[key]) # if stdout is the state output in JSON, don't show it. # otherwise it contains the one line name=value pairs, strip it. data['stdout'] = '' if is_json else data.get('stdout', '')[:idx] state['changes'] = data #FIXME: if it's not changed but there's stdout and/or stderr then those # won't be shown as the function output. (though, they will be shown # inside INFO logs). return state
[ "def", "_reinterpreted_state", "(", "state", ")", ":", "ret", "=", "state", "[", "'changes'", "]", "state", "[", "'changes'", "]", "=", "{", "}", "state", "[", "'comment'", "]", "=", "''", "out", "=", "ret", ".", "get", "(", "'stdout'", ")", "if", "not", "out", ":", "if", "ret", ".", "get", "(", "'stderr'", ")", ":", "state", "[", "'comment'", "]", "=", "ret", "[", "'stderr'", "]", "return", "state", "is_json", "=", "False", "try", ":", "data", "=", "salt", ".", "utils", ".", "json", ".", "loads", "(", "out", ")", "if", "not", "isinstance", "(", "data", ",", "dict", ")", ":", "return", "_failout", "(", "state", ",", "'script JSON output must be a JSON object (e.g., {})!'", ")", "is_json", "=", "True", "except", "ValueError", ":", "idx", "=", "out", ".", "rstrip", "(", ")", ".", "rfind", "(", "'\\n'", ")", "if", "idx", "!=", "-", "1", ":", "out", "=", "out", "[", "idx", "+", "1", ":", "]", "data", "=", "{", "}", "try", ":", "for", "item", "in", "salt", ".", "utils", ".", "args", ".", "shlex_split", "(", "out", ")", ":", "key", ",", "val", "=", "item", ".", "split", "(", "'='", ")", "data", "[", "key", "]", "=", "val", "except", "ValueError", ":", "state", "=", "_failout", "(", "state", ",", "'Failed parsing script output! '", "'Stdout must be JSON or a line of name=value pairs.'", ")", "state", "[", "'changes'", "]", ".", "update", "(", "ret", ")", "return", "state", "changed", "=", "_is_true", "(", "data", ".", "get", "(", "'changed'", ",", "'no'", ")", ")", "if", "'comment'", "in", "data", ":", "state", "[", "'comment'", "]", "=", "data", "[", "'comment'", "]", "del", "data", "[", "'comment'", "]", "if", "changed", ":", "for", "key", "in", "ret", ":", "data", ".", "setdefault", "(", "key", ",", "ret", "[", "key", "]", ")", "# if stdout is the state output in JSON, don't show it.", "# otherwise it contains the one line name=value pairs, strip it.", "data", "[", "'stdout'", "]", "=", "''", "if", "is_json", "else", "data", ".", "get", "(", "'stdout'", ",", "''", ")", "[", ":", "idx", "]", "state", "[", "'changes'", "]", "=", "data", "#FIXME: if it's not changed but there's stdout and/or stderr then those", "# won't be shown as the function output. (though, they will be shown", "# inside INFO logs).", "return", "state" ]
Re-interpret the state returned by salt.state.run using our protocol.
[ "Re", "-", "interpret", "the", "state", "returned", "by", "salt", ".", "state", ".", "run", "using", "our", "protocol", "." ]
python
train
celiao/rtsimple
rtsimple/movies.py
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/movies.py#L60-L70
def cast(self, **kwargs): """Get the cast for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_id_path('cast') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "cast", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_id_path", "(", "'cast'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ")", "return", "response" ]
Get the cast for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API.
[ "Get", "the", "cast", "for", "a", "movie", "specified", "by", "id", "from", "the", "API", "." ]
python
train
GNS3/gns3-server
gns3server/compute/qemu/__init__.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/qemu/__init__.py#L233-L267
def create_disk(self, qemu_img, path, options): """ Create a qemu disk with qemu-img :param qemu_img: qemu-img binary path :param path: Image path :param options: Disk image creation options """ try: img_format = options.pop("format") img_size = options.pop("size") if not os.path.isabs(path): directory = self.get_images_directory() os.makedirs(directory, exist_ok=True) path = os.path.join(directory, os.path.basename(path)) try: if os.path.exists(path): raise QemuError("Could not create disk image {} already exist".format(path)) except UnicodeEncodeError: raise QemuError("Could not create disk image {}, " "path contains characters not supported by filesystem".format(path)) command = [qemu_img, "create", "-f", img_format] for option in sorted(options.keys()): command.extend(["-o", "{}={}".format(option, options[option])]) command.append(path) command.append("{}M".format(img_size)) process = yield from asyncio.create_subprocess_exec(*command) yield from process.wait() except (OSError, subprocess.SubprocessError) as e: raise QemuError("Could not create disk image {}:{}".format(path, e))
[ "def", "create_disk", "(", "self", ",", "qemu_img", ",", "path", ",", "options", ")", ":", "try", ":", "img_format", "=", "options", ".", "pop", "(", "\"format\"", ")", "img_size", "=", "options", ".", "pop", "(", "\"size\"", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "directory", "=", "self", ".", "get_images_directory", "(", ")", "os", ".", "makedirs", "(", "directory", ",", "exist_ok", "=", "True", ")", "path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "os", ".", "path", ".", "basename", "(", "path", ")", ")", "try", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "QemuError", "(", "\"Could not create disk image {} already exist\"", ".", "format", "(", "path", ")", ")", "except", "UnicodeEncodeError", ":", "raise", "QemuError", "(", "\"Could not create disk image {}, \"", "\"path contains characters not supported by filesystem\"", ".", "format", "(", "path", ")", ")", "command", "=", "[", "qemu_img", ",", "\"create\"", ",", "\"-f\"", ",", "img_format", "]", "for", "option", "in", "sorted", "(", "options", ".", "keys", "(", ")", ")", ":", "command", ".", "extend", "(", "[", "\"-o\"", ",", "\"{}={}\"", ".", "format", "(", "option", ",", "options", "[", "option", "]", ")", "]", ")", "command", ".", "append", "(", "path", ")", "command", ".", "append", "(", "\"{}M\"", ".", "format", "(", "img_size", ")", ")", "process", "=", "yield", "from", "asyncio", ".", "create_subprocess_exec", "(", "*", "command", ")", "yield", "from", "process", ".", "wait", "(", ")", "except", "(", "OSError", ",", "subprocess", ".", "SubprocessError", ")", "as", "e", ":", "raise", "QemuError", "(", "\"Could not create disk image {}:{}\"", ".", "format", "(", "path", ",", "e", ")", ")" ]
Create a qemu disk with qemu-img :param qemu_img: qemu-img binary path :param path: Image path :param options: Disk image creation options
[ "Create", "a", "qemu", "disk", "with", "qemu", "-", "img" ]
python
train
shmir/PyTrafficGenerator
trafficgenerator/tgn_object.py
https://github.com/shmir/PyTrafficGenerator/blob/382e5d549c83404af2a6571fe19c9e71df8bac14/trafficgenerator/tgn_object.py#L144-L156
def get_objects_by_type(self, *types): """ Returned objects stored in memory (without re-reading them from the TGN). Use this method for fast access to objects in case of static configurations. :param types: requested object types. :return: all children of the specified types. """ if not types: return self.objects.values() types_l = [o.lower() for o in types] return [o for o in self.objects.values() if o.obj_type().lower() in types_l]
[ "def", "get_objects_by_type", "(", "self", ",", "*", "types", ")", ":", "if", "not", "types", ":", "return", "self", ".", "objects", ".", "values", "(", ")", "types_l", "=", "[", "o", ".", "lower", "(", ")", "for", "o", "in", "types", "]", "return", "[", "o", "for", "o", "in", "self", ".", "objects", ".", "values", "(", ")", "if", "o", ".", "obj_type", "(", ")", ".", "lower", "(", ")", "in", "types_l", "]" ]
Returned objects stored in memory (without re-reading them from the TGN). Use this method for fast access to objects in case of static configurations. :param types: requested object types. :return: all children of the specified types.
[ "Returned", "objects", "stored", "in", "memory", "(", "without", "re", "-", "reading", "them", "from", "the", "TGN", ")", "." ]
python
train
sengupta/twss
twss/twsslib.py
https://github.com/sengupta/twss/blob/69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f/twss/twsslib.py#L73-L79
def is_twss(self, phrase): """ The magic function- this accepts a phrase and tells you if it classifies as an entendre """ featureset = self.extract_features(phrase) return self.classifier.classify(featureset)
[ "def", "is_twss", "(", "self", ",", "phrase", ")", ":", "featureset", "=", "self", ".", "extract_features", "(", "phrase", ")", "return", "self", ".", "classifier", ".", "classify", "(", "featureset", ")" ]
The magic function- this accepts a phrase and tells you if it classifies as an entendre
[ "The", "magic", "function", "-", "this", "accepts", "a", "phrase", "and", "tells", "you", "if", "it", "classifies", "as", "an", "entendre" ]
python
train
mahmoudimus/nose-timer
nosetimer/plugin.py
https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L236-L241
def _colored_time(self, time_taken, color=None): """Get formatted and colored string for a given time taken.""" if self.timer_no_color: return "{0:0.4f}s".format(time_taken) return _colorize("{0:0.4f}s".format(time_taken), color)
[ "def", "_colored_time", "(", "self", ",", "time_taken", ",", "color", "=", "None", ")", ":", "if", "self", ".", "timer_no_color", ":", "return", "\"{0:0.4f}s\"", ".", "format", "(", "time_taken", ")", "return", "_colorize", "(", "\"{0:0.4f}s\"", ".", "format", "(", "time_taken", ")", ",", "color", ")" ]
Get formatted and colored string for a given time taken.
[ "Get", "formatted", "and", "colored", "string", "for", "a", "given", "time", "taken", "." ]
python
train
davisd50/sparc.apps.cache
sparc/apps/cache/cache.py
https://github.com/davisd50/sparc.apps.cache/blob/793b0f18255230809c30dc27c2bb1bb04b3f194d/sparc/apps/cache/cache.py#L75-L103
def configure_zca(cls, cache_config): """Configure runtime Zope Component Architecture registry We need a 3 step process to make sure dependencies are met 1. load static package-based ZCML files....standard stuff here. 2. Register the config into the registry (i.e. make it available for lookup) 3. Manually register zcml entries in the config (these entries may be dependent on having the config available for lookup) Args: cache_config: application configuration file name or file object """ # step 1 packages = [sparc.apps.cache] Configure(packages) configure_vocabulary_registry() #step 2 config = ElementTree.parse(cache_config).getroot() for zcml in config.findall('zcml'): zcml_file, package = 'configure.zcml' \ if 'file' not in zcml.attrib else zcml.attrib['file'],\ import_module(zcml.attrib['package']) zope.configuration.xmlconfig.XMLConfig(zcml_file, package)() #step3 alsoProvides(config, IAppElementTreeConfig) sm = getSiteManager() sm.registerUtility(config, IAppElementTreeConfig) return config
[ "def", "configure_zca", "(", "cls", ",", "cache_config", ")", ":", "# step 1", "packages", "=", "[", "sparc", ".", "apps", ".", "cache", "]", "Configure", "(", "packages", ")", "configure_vocabulary_registry", "(", ")", "#step 2", "config", "=", "ElementTree", ".", "parse", "(", "cache_config", ")", ".", "getroot", "(", ")", "for", "zcml", "in", "config", ".", "findall", "(", "'zcml'", ")", ":", "zcml_file", ",", "package", "=", "'configure.zcml'", "if", "'file'", "not", "in", "zcml", ".", "attrib", "else", "zcml", ".", "attrib", "[", "'file'", "]", ",", "import_module", "(", "zcml", ".", "attrib", "[", "'package'", "]", ")", "zope", ".", "configuration", ".", "xmlconfig", ".", "XMLConfig", "(", "zcml_file", ",", "package", ")", "(", ")", "#step3", "alsoProvides", "(", "config", ",", "IAppElementTreeConfig", ")", "sm", "=", "getSiteManager", "(", ")", "sm", ".", "registerUtility", "(", "config", ",", "IAppElementTreeConfig", ")", "return", "config" ]
Configure runtime Zope Component Architecture registry We need a 3 step process to make sure dependencies are met 1. load static package-based ZCML files....standard stuff here. 2. Register the config into the registry (i.e. make it available for lookup) 3. Manually register zcml entries in the config (these entries may be dependent on having the config available for lookup) Args: cache_config: application configuration file name or file object
[ "Configure", "runtime", "Zope", "Component", "Architecture", "registry", "We", "need", "a", "3", "step", "process", "to", "make", "sure", "dependencies", "are", "met", "1", ".", "load", "static", "package", "-", "based", "ZCML", "files", "....", "standard", "stuff", "here", ".", "2", ".", "Register", "the", "config", "into", "the", "registry", "(", "i", ".", "e", ".", "make", "it", "available", "for", "lookup", ")", "3", ".", "Manually", "register", "zcml", "entries", "in", "the", "config", "(", "these", "entries", "may", "be", "dependent", "on", "having", "the", "config", "available", "for", "lookup", ")", "Args", ":", "cache_config", ":", "application", "configuration", "file", "name", "or", "file", "object" ]
python
train
mikemaccana/python-docx
docx.py
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L1006-L1011
def websettings(): '''Generate websettings''' web = makeelement('webSettings') web.append(makeelement('allowPNG')) web.append(makeelement('doNotSaveAsSingleFile')) return web
[ "def", "websettings", "(", ")", ":", "web", "=", "makeelement", "(", "'webSettings'", ")", "web", ".", "append", "(", "makeelement", "(", "'allowPNG'", ")", ")", "web", ".", "append", "(", "makeelement", "(", "'doNotSaveAsSingleFile'", ")", ")", "return", "web" ]
Generate websettings
[ "Generate", "websettings" ]
python
train
ets-labs/python-domain-models
domain_models/models.py
https://github.com/ets-labs/python-domain-models/blob/7de1816ba0338f20fdb3e0f57fad0ffd5bea13f9/domain_models/models.py#L50-L61
def prepare_fields_attribute(attribute_name, attributes, class_name): """Prepare model fields attribute.""" attribute = attributes.get(attribute_name) if not attribute: attribute = tuple() elif isinstance(attribute, std_collections.Iterable): attribute = tuple(attribute) else: raise errors.Error('{0}.{1} is supposed to be a list of {2}, ' 'instead {3} given', class_name, attribute_name, fields.Field, attribute) return attribute
[ "def", "prepare_fields_attribute", "(", "attribute_name", ",", "attributes", ",", "class_name", ")", ":", "attribute", "=", "attributes", ".", "get", "(", "attribute_name", ")", "if", "not", "attribute", ":", "attribute", "=", "tuple", "(", ")", "elif", "isinstance", "(", "attribute", ",", "std_collections", ".", "Iterable", ")", ":", "attribute", "=", "tuple", "(", "attribute", ")", "else", ":", "raise", "errors", ".", "Error", "(", "'{0}.{1} is supposed to be a list of {2}, '", "'instead {3} given'", ",", "class_name", ",", "attribute_name", ",", "fields", ".", "Field", ",", "attribute", ")", "return", "attribute" ]
Prepare model fields attribute.
[ "Prepare", "model", "fields", "attribute", "." ]
python
train
brainiak/brainiak
brainiak/utils/utils.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L95-L128
def sumexp_stable(data): """Compute the sum of exponents for a list of samples Parameters ---------- data : array, shape=[features, samples] A data array containing samples. Returns ------- result_sum : array, shape=[samples,] The sum of exponents for each sample divided by the exponent of the maximum feature value in the sample. max_value : array, shape=[samples,] The maximum feature value for each sample. result_exp : array, shape=[features, samples] The exponent of each element in each sample divided by the exponent of the maximum feature value in the sample. Note ---- This function is more stable than computing the sum(exp(v)). It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function. """ max_value = data.max(axis=0) result_exp = np.exp(data - max_value) result_sum = np.sum(result_exp, axis=0) return result_sum, max_value, result_exp
[ "def", "sumexp_stable", "(", "data", ")", ":", "max_value", "=", "data", ".", "max", "(", "axis", "=", "0", ")", "result_exp", "=", "np", ".", "exp", "(", "data", "-", "max_value", ")", "result_sum", "=", "np", ".", "sum", "(", "result_exp", ",", "axis", "=", "0", ")", "return", "result_sum", ",", "max_value", ",", "result_exp" ]
Compute the sum of exponents for a list of samples Parameters ---------- data : array, shape=[features, samples] A data array containing samples. Returns ------- result_sum : array, shape=[samples,] The sum of exponents for each sample divided by the exponent of the maximum feature value in the sample. max_value : array, shape=[samples,] The maximum feature value for each sample. result_exp : array, shape=[features, samples] The exponent of each element in each sample divided by the exponent of the maximum feature value in the sample. Note ---- This function is more stable than computing the sum(exp(v)). It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function.
[ "Compute", "the", "sum", "of", "exponents", "for", "a", "list", "of", "samples" ]
python
train
ejeschke/ginga
ginga/rv/plugins/SaveImage.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/SaveImage.py#L372-L393
def _write_header(self, image, hdu): """Write header from image object to given HDU.""" hduhdr = hdu.header # Ginga image header object for the given extension only. # Cannot use get_header() because that might also return PRI hdr. ghdr = image.metadata['header'] for key in ghdr: # Need this to avoid duplication because COMMENT is a weird field if key.upper() == 'COMMENT': continue bnch = ghdr.get_card(key) # Insert new keyword if key not in hduhdr: hduhdr[key] = (bnch.value, bnch.comment) # Update existing keyword elif hduhdr[key] != bnch.value: hduhdr[key] = bnch.value
[ "def", "_write_header", "(", "self", ",", "image", ",", "hdu", ")", ":", "hduhdr", "=", "hdu", ".", "header", "# Ginga image header object for the given extension only.", "# Cannot use get_header() because that might also return PRI hdr.", "ghdr", "=", "image", ".", "metadata", "[", "'header'", "]", "for", "key", "in", "ghdr", ":", "# Need this to avoid duplication because COMMENT is a weird field", "if", "key", ".", "upper", "(", ")", "==", "'COMMENT'", ":", "continue", "bnch", "=", "ghdr", ".", "get_card", "(", "key", ")", "# Insert new keyword", "if", "key", "not", "in", "hduhdr", ":", "hduhdr", "[", "key", "]", "=", "(", "bnch", ".", "value", ",", "bnch", ".", "comment", ")", "# Update existing keyword", "elif", "hduhdr", "[", "key", "]", "!=", "bnch", ".", "value", ":", "hduhdr", "[", "key", "]", "=", "bnch", ".", "value" ]
Write header from image object to given HDU.
[ "Write", "header", "from", "image", "object", "to", "given", "HDU", "." ]
python
train
ethereum/web3.py
web3/_utils/encoding.py
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/_utils/encoding.py#L118-L141
def to_int(value=None, hexstr=None, text=None): """ Converts value to it's integer representation. Values are converted this way: * value: * bytes: big-endian integer * bool: True => 1, False => 0 * hexstr: interpret hex as integer * text: interpret as string of digits, like '12' => 12 """ assert_one_val(value, hexstr=hexstr, text=text) if hexstr is not None: return int(hexstr, 16) elif text is not None: return int(text) elif isinstance(value, bytes): return big_endian_to_int(value) elif isinstance(value, str): raise TypeError("Pass in strings with keyword hexstr or text") else: return int(value)
[ "def", "to_int", "(", "value", "=", "None", ",", "hexstr", "=", "None", ",", "text", "=", "None", ")", ":", "assert_one_val", "(", "value", ",", "hexstr", "=", "hexstr", ",", "text", "=", "text", ")", "if", "hexstr", "is", "not", "None", ":", "return", "int", "(", "hexstr", ",", "16", ")", "elif", "text", "is", "not", "None", ":", "return", "int", "(", "text", ")", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "return", "big_endian_to_int", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "raise", "TypeError", "(", "\"Pass in strings with keyword hexstr or text\"", ")", "else", ":", "return", "int", "(", "value", ")" ]
Converts value to it's integer representation. Values are converted this way: * value: * bytes: big-endian integer * bool: True => 1, False => 0 * hexstr: interpret hex as integer * text: interpret as string of digits, like '12' => 12
[ "Converts", "value", "to", "it", "s", "integer", "representation", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/SimulationAPI/observation_api.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/SimulationAPI/observation_api.py#L179-L195
def magnitude2cps(self, magnitude): """ converts an apparent magnitude to counts per second (in units of the data) The zero point of an instrument, by definition, is the magnitude of an object that produces one count (or data number, DN) per second. The magnitude of an arbitrary object producing DN counts in an observation of length EXPTIME is therefore: m = -2.5 x log10(DN / EXPTIME) + ZEROPOINT :param magnitude: magnitude of object :return: counts per second of object """ # compute counts in units of ADS (as magnitude zero point is defined) cps = data_util.magnitude2cps(magnitude, magnitude_zero_point=self._magnitude_zero_point) if self._data_count_unit == 'e-': cps *= self.ccd_gain return cps
[ "def", "magnitude2cps", "(", "self", ",", "magnitude", ")", ":", "# compute counts in units of ADS (as magnitude zero point is defined)", "cps", "=", "data_util", ".", "magnitude2cps", "(", "magnitude", ",", "magnitude_zero_point", "=", "self", ".", "_magnitude_zero_point", ")", "if", "self", ".", "_data_count_unit", "==", "'e-'", ":", "cps", "*=", "self", ".", "ccd_gain", "return", "cps" ]
converts an apparent magnitude to counts per second (in units of the data) The zero point of an instrument, by definition, is the magnitude of an object that produces one count (or data number, DN) per second. The magnitude of an arbitrary object producing DN counts in an observation of length EXPTIME is therefore: m = -2.5 x log10(DN / EXPTIME) + ZEROPOINT :param magnitude: magnitude of object :return: counts per second of object
[ "converts", "an", "apparent", "magnitude", "to", "counts", "per", "second", "(", "in", "units", "of", "the", "data", ")" ]
python
train
Akhail/Tebless
tebless/widgets/window.py
https://github.com/Akhail/Tebless/blob/369ff76f06e7a0b6d04fabc287fa6c4095e158d4/tebless/widgets/window.py#L100-L112
def add(self, widget, *args, **kwargs): """Insert new element. Usage: window.add(widget, **{ 'prop1': val, 'prop2': val2 }) """ ins_widget = widget(*args, **kwargs) self.__iadd__(ins_widget) return ins_widget
[ "def", "add", "(", "self", ",", "widget", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ins_widget", "=", "widget", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "__iadd__", "(", "ins_widget", ")", "return", "ins_widget" ]
Insert new element. Usage: window.add(widget, **{ 'prop1': val, 'prop2': val2 })
[ "Insert", "new", "element", "." ]
python
train
mikedh/trimesh
trimesh/path/curve.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/curve.py#L7-L55
def discretize_bezier(points, count=None, scale=1.0): """ Parameters ---------- points : (order, dimension) float Control points of the bezier curve For a 2D cubic bezier, order=3, dimension=2 count : int, or None Number of segments scale : float Scale of curve Returns ---------- discrete: (n,d) list of points, a polyline representation of the bezier curve which respects constants.RES_LENGTH """ # make sure we have a numpy array points = np.asanyarray(points, dtype=np.float64) if count is None: # how much distance does a small percentage of the curve take # this is so we can figure out how finely we have to sample t norm = np.linalg.norm(np.diff(points, axis=0), axis=1).sum() count = np.ceil(norm / (res.seg_frac * scale)) count = int(np.clip(count, res.min_sections * len(points), res.max_sections * len(points))) count = int(count) # parameterize incrementing 0.0 - 1.0 t = np.linspace(0.0, 1.0, count) # decrementing 1.0-0.0 t_d = 1.0 - t n = len(points) - 1 # binomial coefficients, i, and each point iterable = zip(binomial(n), np.arange(len(points)), points) # run the actual interpolation stacked = [((t**i) * (t_d**(n - i))).reshape((-1, 1)) * p * c for c, i, p in iterable] result = np.sum(stacked, axis=0) # test to make sure end points are correct test = np.sum((result[[0, -1]] - points[[0, -1]])**2, axis=1) assert (test < tol.merge).all() assert len(result) >= 2 return result
[ "def", "discretize_bezier", "(", "points", ",", "count", "=", "None", ",", "scale", "=", "1.0", ")", ":", "# make sure we have a numpy array", "points", "=", "np", ".", "asanyarray", "(", "points", ",", "dtype", "=", "np", ".", "float64", ")", "if", "count", "is", "None", ":", "# how much distance does a small percentage of the curve take", "# this is so we can figure out how finely we have to sample t", "norm", "=", "np", ".", "linalg", ".", "norm", "(", "np", ".", "diff", "(", "points", ",", "axis", "=", "0", ")", ",", "axis", "=", "1", ")", ".", "sum", "(", ")", "count", "=", "np", ".", "ceil", "(", "norm", "/", "(", "res", ".", "seg_frac", "*", "scale", ")", ")", "count", "=", "int", "(", "np", ".", "clip", "(", "count", ",", "res", ".", "min_sections", "*", "len", "(", "points", ")", ",", "res", ".", "max_sections", "*", "len", "(", "points", ")", ")", ")", "count", "=", "int", "(", "count", ")", "# parameterize incrementing 0.0 - 1.0", "t", "=", "np", ".", "linspace", "(", "0.0", ",", "1.0", ",", "count", ")", "# decrementing 1.0-0.0", "t_d", "=", "1.0", "-", "t", "n", "=", "len", "(", "points", ")", "-", "1", "# binomial coefficients, i, and each point", "iterable", "=", "zip", "(", "binomial", "(", "n", ")", ",", "np", ".", "arange", "(", "len", "(", "points", ")", ")", ",", "points", ")", "# run the actual interpolation", "stacked", "=", "[", "(", "(", "t", "**", "i", ")", "*", "(", "t_d", "**", "(", "n", "-", "i", ")", ")", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "*", "p", "*", "c", "for", "c", ",", "i", ",", "p", "in", "iterable", "]", "result", "=", "np", ".", "sum", "(", "stacked", ",", "axis", "=", "0", ")", "# test to make sure end points are correct", "test", "=", "np", ".", "sum", "(", "(", "result", "[", "[", "0", ",", "-", "1", "]", "]", "-", "points", "[", "[", "0", ",", "-", "1", "]", "]", ")", "**", "2", ",", "axis", "=", "1", ")", "assert", "(", "test", "<", "tol", ".", "merge", ")", ".", "all", "(", ")", "assert", "len", "(", "result", ")", ">=", "2", "return", "result" ]
Parameters ---------- points : (order, dimension) float Control points of the bezier curve For a 2D cubic bezier, order=3, dimension=2 count : int, or None Number of segments scale : float Scale of curve Returns ---------- discrete: (n,d) list of points, a polyline representation of the bezier curve which respects constants.RES_LENGTH
[ "Parameters", "----------", "points", ":", "(", "order", "dimension", ")", "float", "Control", "points", "of", "the", "bezier", "curve", "For", "a", "2D", "cubic", "bezier", "order", "=", "3", "dimension", "=", "2", "count", ":", "int", "or", "None", "Number", "of", "segments", "scale", ":", "float", "Scale", "of", "curve", "Returns", "----------", "discrete", ":", "(", "n", "d", ")", "list", "of", "points", "a", "polyline", "representation", "of", "the", "bezier", "curve", "which", "respects", "constants", ".", "RES_LENGTH" ]
python
train
briney/abutils
abutils/utils/pipeline.py
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/utils/pipeline.py#L40-L71
def initialize(log_file, project_dir=None, debug=False): ''' Initializes an AbTools pipeline. Initialization includes printing the AbTools splash, setting up logging, creating the project directory, and logging both the project directory and the log location. Args: log_file (str): Path to the log file. Required. project_dir (str): Path to the project directory. If not provided, the project directory won't be created and the location won't be logged. debug (bool): If ``True``, the logging level will be set to ``logging.DEBUG``. Default is ``FALSE``, which logs at ``logging.INFO``. Returns: logger ''' print_splash() log.setup_logging(log_file, print_log_location=False, debug=debug) logger = log.get_logger('pipeline') if project_dir is not None: make_dir(os.path.normpath(project_dir)) logger.info('PROJECT DIRECTORY: {}'.format(project_dir)) logger.info('') logger.info('LOG LOCATION: {}'.format(log_file)) print('') return logger
[ "def", "initialize", "(", "log_file", ",", "project_dir", "=", "None", ",", "debug", "=", "False", ")", ":", "print_splash", "(", ")", "log", ".", "setup_logging", "(", "log_file", ",", "print_log_location", "=", "False", ",", "debug", "=", "debug", ")", "logger", "=", "log", ".", "get_logger", "(", "'pipeline'", ")", "if", "project_dir", "is", "not", "None", ":", "make_dir", "(", "os", ".", "path", ".", "normpath", "(", "project_dir", ")", ")", "logger", ".", "info", "(", "'PROJECT DIRECTORY: {}'", ".", "format", "(", "project_dir", ")", ")", "logger", ".", "info", "(", "''", ")", "logger", ".", "info", "(", "'LOG LOCATION: {}'", ".", "format", "(", "log_file", ")", ")", "print", "(", "''", ")", "return", "logger" ]
Initializes an AbTools pipeline. Initialization includes printing the AbTools splash, setting up logging, creating the project directory, and logging both the project directory and the log location. Args: log_file (str): Path to the log file. Required. project_dir (str): Path to the project directory. If not provided, the project directory won't be created and the location won't be logged. debug (bool): If ``True``, the logging level will be set to ``logging.DEBUG``. Default is ``FALSE``, which logs at ``logging.INFO``. Returns: logger
[ "Initializes", "an", "AbTools", "pipeline", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/stringfunc.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/stringfunc.py#L73-L83
def multiple_replace(text: str, rep: Dict[str, str]) -> str: """ Returns a version of ``text`` in which the keys of ``rep`` (a dict) have been replaced by their values. As per http://stackoverflow.com/questions/6116978/python-replace-multiple-strings. """ rep = dict((re.escape(k), v) for k, v in rep.items()) pattern = re.compile("|".join(rep.keys())) return pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
[ "def", "multiple_replace", "(", "text", ":", "str", ",", "rep", ":", "Dict", "[", "str", ",", "str", "]", ")", "->", "str", ":", "rep", "=", "dict", "(", "(", "re", ".", "escape", "(", "k", ")", ",", "v", ")", "for", "k", ",", "v", "in", "rep", ".", "items", "(", ")", ")", "pattern", "=", "re", ".", "compile", "(", "\"|\"", ".", "join", "(", "rep", ".", "keys", "(", ")", ")", ")", "return", "pattern", ".", "sub", "(", "lambda", "m", ":", "rep", "[", "re", ".", "escape", "(", "m", ".", "group", "(", "0", ")", ")", "]", ",", "text", ")" ]
Returns a version of ``text`` in which the keys of ``rep`` (a dict) have been replaced by their values. As per http://stackoverflow.com/questions/6116978/python-replace-multiple-strings.
[ "Returns", "a", "version", "of", "text", "in", "which", "the", "keys", "of", "rep", "(", "a", "dict", ")", "have", "been", "replaced", "by", "their", "values", "." ]
python
train
openstack/quark
quark/tags.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tags.py#L118-L135
def validate(self, value): """Validates a VLAN ID. :param value: The VLAN ID to validate against. :raises TagValidationError: Raised if the VLAN ID is invalid. """ try: vlan_id_int = int(value) assert vlan_id_int >= self.MIN_VLAN_ID assert vlan_id_int <= self.MAX_VLAN_ID except Exception: msg = ("Invalid vlan_id. Got '%(vlan_id)s'. " "vlan_id should be an integer between %(min)d and %(max)d " "inclusive." % {'vlan_id': value, 'min': self.MIN_VLAN_ID, 'max': self.MAX_VLAN_ID}) raise TagValidationError(value, msg) return True
[ "def", "validate", "(", "self", ",", "value", ")", ":", "try", ":", "vlan_id_int", "=", "int", "(", "value", ")", "assert", "vlan_id_int", ">=", "self", ".", "MIN_VLAN_ID", "assert", "vlan_id_int", "<=", "self", ".", "MAX_VLAN_ID", "except", "Exception", ":", "msg", "=", "(", "\"Invalid vlan_id. Got '%(vlan_id)s'. \"", "\"vlan_id should be an integer between %(min)d and %(max)d \"", "\"inclusive.\"", "%", "{", "'vlan_id'", ":", "value", ",", "'min'", ":", "self", ".", "MIN_VLAN_ID", ",", "'max'", ":", "self", ".", "MAX_VLAN_ID", "}", ")", "raise", "TagValidationError", "(", "value", ",", "msg", ")", "return", "True" ]
Validates a VLAN ID. :param value: The VLAN ID to validate against. :raises TagValidationError: Raised if the VLAN ID is invalid.
[ "Validates", "a", "VLAN", "ID", "." ]
python
valid
pyfca/pyfca
pyfca/implications.py
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L454-L463
def UV_H(self): """ UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U) K = all closed sets This is UV_H function, but the returned implications are respected by all attribute sets of this context. This corresponds to a multiplication or & operation of the Hg sets. """ h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self)) return UV_H(h, self.width)
[ "def", "UV_H", "(", "self", ")", ":", "h", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "&", "y", ",", "(", "H", "(", "g", ",", "self", ".", "width", "-", "1", ")", "for", "g", "in", "self", ")", ")", "return", "UV_H", "(", "h", ",", "self", ".", "width", ")" ]
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U) K = all closed sets This is UV_H function, but the returned implications are respected by all attribute sets of this context. This corresponds to a multiplication or & operation of the Hg sets.
[ "UV", "=", "all", "non", "-", "trivial", "(", "!V⊂U", ")", "implications", "U", "-", ">", "V", "with", "UuV", "closed", ";", "in", "ternary", "coding", "(", "1", "=", "V", "2", "=", "U", ")", "K", "=", "all", "closed", "sets" ]
python
train
bionikspoon/pureyaml
pureyaml/_compat/total_ordering.py
https://github.com/bionikspoon/pureyaml/blob/784830b907ca14525c4cecdb6ae35306f6f8a877/pureyaml/_compat/total_ordering.py#L92-L97
def _gt_from_ge(self, other): """Return a > b. Computed by @total_ordering from (a >= b) and (a != b).""" op_result = self.__ge__(other) if op_result is NotImplemented: return NotImplemented return op_result and self != other
[ "def", "_gt_from_ge", "(", "self", ",", "other", ")", ":", "op_result", "=", "self", ".", "__ge__", "(", "other", ")", "if", "op_result", "is", "NotImplemented", ":", "return", "NotImplemented", "return", "op_result", "and", "self", "!=", "other" ]
Return a > b. Computed by @total_ordering from (a >= b) and (a != b).
[ "Return", "a", ">", "b", ".", "Computed", "by" ]
python
train
urschrei/pyzotero
pyzotero/zotero.py
https://github.com/urschrei/pyzotero/blob/b378966b30146a952f7953c23202fb5a1ddf81d9/pyzotero/zotero.py#L1224-L1250
def update_collection(self, payload, last_modified=None): """ Update a Zotero collection property such as 'name' Accepts one argument, a dict containing collection data retrieved using e.g. 'collections()' """ modified = payload["version"] if last_modified is not None: modified = last_modified key = payload["key"] headers = {"If-Unmodified-Since-Version": str(modified)} headers.update(self.default_headers()) headers.update({"Content-Type": "application/json"}) req = requests.put( url=self.endpoint + "/{t}/{u}/collections/{c}".format( t=self.library_type, u=self.library_id, c=key ), headers=headers, data=json.dumps(payload), ) self.request = req try: req.raise_for_status() except requests.exceptions.HTTPError: error_handler(req) return True
[ "def", "update_collection", "(", "self", ",", "payload", ",", "last_modified", "=", "None", ")", ":", "modified", "=", "payload", "[", "\"version\"", "]", "if", "last_modified", "is", "not", "None", ":", "modified", "=", "last_modified", "key", "=", "payload", "[", "\"key\"", "]", "headers", "=", "{", "\"If-Unmodified-Since-Version\"", ":", "str", "(", "modified", ")", "}", "headers", ".", "update", "(", "self", ".", "default_headers", "(", ")", ")", "headers", ".", "update", "(", "{", "\"Content-Type\"", ":", "\"application/json\"", "}", ")", "req", "=", "requests", ".", "put", "(", "url", "=", "self", ".", "endpoint", "+", "\"/{t}/{u}/collections/{c}\"", ".", "format", "(", "t", "=", "self", ".", "library_type", ",", "u", "=", "self", ".", "library_id", ",", "c", "=", "key", ")", ",", "headers", "=", "headers", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ",", ")", "self", ".", "request", "=", "req", "try", ":", "req", ".", "raise_for_status", "(", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", ":", "error_handler", "(", "req", ")", "return", "True" ]
Update a Zotero collection property such as 'name' Accepts one argument, a dict containing collection data retrieved using e.g. 'collections()'
[ "Update", "a", "Zotero", "collection", "property", "such", "as", "name", "Accepts", "one", "argument", "a", "dict", "containing", "collection", "data", "retrieved", "using", "e", ".", "g", ".", "collections", "()" ]
python
valid
RedHatQE/python-stitches
stitches/expect.py
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/expect.py#L179-L211
def expect_retval(connection, command, expected_status=0, timeout=10): ''' Run command and expect specified return valud @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param expected_status: expected return value @type expected_status: int @param timeout: timeout for performing expect operation @type timeout: int @return: return value @rtype: int @raises ExpectFailed ''' retval = connection.recv_exit_status(command, timeout) if retval is None: raise ExpectFailed("Got timeout (%i seconds) while executing '%s'" % (timeout, command)) elif retval != expected_status: raise ExpectFailed("Got %s exit status (%s expected)\ncmd: %s\nstdout: %s\nstderr: %s" % (retval, expected_status, connection.last_command, connection.last_stdout, connection.last_stderr)) if connection.output_shell: sys.stdout.write("Run '%s', got %i return value\n" % (command, retval)) return retval
[ "def", "expect_retval", "(", "connection", ",", "command", ",", "expected_status", "=", "0", ",", "timeout", "=", "10", ")", ":", "retval", "=", "connection", ".", "recv_exit_status", "(", "command", ",", "timeout", ")", "if", "retval", "is", "None", ":", "raise", "ExpectFailed", "(", "\"Got timeout (%i seconds) while executing '%s'\"", "%", "(", "timeout", ",", "command", ")", ")", "elif", "retval", "!=", "expected_status", ":", "raise", "ExpectFailed", "(", "\"Got %s exit status (%s expected)\\ncmd: %s\\nstdout: %s\\nstderr: %s\"", "%", "(", "retval", ",", "expected_status", ",", "connection", ".", "last_command", ",", "connection", ".", "last_stdout", ",", "connection", ".", "last_stderr", ")", ")", "if", "connection", ".", "output_shell", ":", "sys", ".", "stdout", ".", "write", "(", "\"Run '%s', got %i return value\\n\"", "%", "(", "command", ",", "retval", ")", ")", "return", "retval" ]
Run command and expect specified return valud @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param expected_status: expected return value @type expected_status: int @param timeout: timeout for performing expect operation @type timeout: int @return: return value @rtype: int @raises ExpectFailed
[ "Run", "command", "and", "expect", "specified", "return", "valud" ]
python
train
zxylvlp/PingPHP
pingphp/grammar.py
https://github.com/zxylvlp/PingPHP/blob/2e9a5f1ef4b5b13310e3f8ff350fa91032357bc5/pingphp/grammar.py#L610-L620
def p_Varible(p): ''' Varible : NsContentName | NsContentName SCOPEOP INDENTIFIER | NsContentName SCOPEOP CLASS | STATIC SCOPEOP INDENTIFIER ''' if len(p) < 3: p[0] = Varible(None, p[1]) else: p[0] = Varible(p[1], p[3])
[ "def", "p_Varible", "(", "p", ")", ":", "if", "len", "(", "p", ")", "<", "3", ":", "p", "[", "0", "]", "=", "Varible", "(", "None", ",", "p", "[", "1", "]", ")", "else", ":", "p", "[", "0", "]", "=", "Varible", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ")" ]
Varible : NsContentName | NsContentName SCOPEOP INDENTIFIER | NsContentName SCOPEOP CLASS | STATIC SCOPEOP INDENTIFIER
[ "Varible", ":", "NsContentName", "|", "NsContentName", "SCOPEOP", "INDENTIFIER", "|", "NsContentName", "SCOPEOP", "CLASS", "|", "STATIC", "SCOPEOP", "INDENTIFIER" ]
python
train
obriencj/python-javatools
javatools/__init__.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L1673-L1702
def iter_code_by_lines(self): """ ((abs_line, rel_line, [(offset, code, args), ...]), ...) """ lnt = self.get_linenumbertable() if not lnt: yield (None, None, self.disassemble()) return lnt_offset = lnt[0][1] cur_line = None current = None for codelet in self.disassemble(): abs_line = self.get_line_for_offset(codelet[0]) if cur_line == abs_line: current.append(codelet) else: if cur_line is not None: yield (cur_line, cur_line - lnt_offset, current) cur_line = abs_line current = [codelet] if current: yield (cur_line, cur_line - lnt_offset, current)
[ "def", "iter_code_by_lines", "(", "self", ")", ":", "lnt", "=", "self", ".", "get_linenumbertable", "(", ")", "if", "not", "lnt", ":", "yield", "(", "None", ",", "None", ",", "self", ".", "disassemble", "(", ")", ")", "return", "lnt_offset", "=", "lnt", "[", "0", "]", "[", "1", "]", "cur_line", "=", "None", "current", "=", "None", "for", "codelet", "in", "self", ".", "disassemble", "(", ")", ":", "abs_line", "=", "self", ".", "get_line_for_offset", "(", "codelet", "[", "0", "]", ")", "if", "cur_line", "==", "abs_line", ":", "current", ".", "append", "(", "codelet", ")", "else", ":", "if", "cur_line", "is", "not", "None", ":", "yield", "(", "cur_line", ",", "cur_line", "-", "lnt_offset", ",", "current", ")", "cur_line", "=", "abs_line", "current", "=", "[", "codelet", "]", "if", "current", ":", "yield", "(", "cur_line", ",", "cur_line", "-", "lnt_offset", ",", "current", ")" ]
((abs_line, rel_line, [(offset, code, args), ...]), ...)
[ "((", "abs_line", "rel_line", "[", "(", "offset", "code", "args", ")", "...", "]", ")", "...", ")" ]
python
train
zestyping/star-destroyer
star_destroyer.py
https://github.com/zestyping/star-destroyer/blob/e23584c85d1e8b8f098e5c75977c6a98a41f3f68/star_destroyer.py#L101-L114
def get_star_names(self, modpath): """Returns all the names imported by 'import *' from a given module.""" if modpath not in self.star_names: print('Importing %s to resolve import *' % modpath, file=sys.stderr) try: module = self.import_module(modpath) except ImportError: print('ERROR: Failed to import %s!' % modpath, file=sys.stderr) self.star_names[modpath] = [] else: self.star_names[modpath] = sorted(getattr( module, '__all__', [name for name in dir(module) if not name.startswith('_')])) return self.star_names[modpath]
[ "def", "get_star_names", "(", "self", ",", "modpath", ")", ":", "if", "modpath", "not", "in", "self", ".", "star_names", ":", "print", "(", "'Importing %s to resolve import *'", "%", "modpath", ",", "file", "=", "sys", ".", "stderr", ")", "try", ":", "module", "=", "self", ".", "import_module", "(", "modpath", ")", "except", "ImportError", ":", "print", "(", "'ERROR: Failed to import %s!'", "%", "modpath", ",", "file", "=", "sys", ".", "stderr", ")", "self", ".", "star_names", "[", "modpath", "]", "=", "[", "]", "else", ":", "self", ".", "star_names", "[", "modpath", "]", "=", "sorted", "(", "getattr", "(", "module", ",", "'__all__'", ",", "[", "name", "for", "name", "in", "dir", "(", "module", ")", "if", "not", "name", ".", "startswith", "(", "'_'", ")", "]", ")", ")", "return", "self", ".", "star_names", "[", "modpath", "]" ]
Returns all the names imported by 'import *' from a given module.
[ "Returns", "all", "the", "names", "imported", "by", "import", "*", "from", "a", "given", "module", "." ]
python
train
saschpe/rapport
rapport/util.py
https://github.com/saschpe/rapport/blob/ccceb8f84bd7e8add88ab5e137cdab6424aa4683/rapport/util.py#L32-L41
def camelcase_to_underscores(word): """Converts a CamelCase word into an under_score word. >>> camelcase_to_underscores("CamelCaseCase") 'camel_case_case' >>> camelcase_to_underscores("getHTTPResponseCode") 'get_http_response_code' """ s1 = _FIRST_CAP_RE.sub(r'\1_\2', word) return _ALL_CAP_RE.sub(r'\1_\2', s1).lower()
[ "def", "camelcase_to_underscores", "(", "word", ")", ":", "s1", "=", "_FIRST_CAP_RE", ".", "sub", "(", "r'\\1_\\2'", ",", "word", ")", "return", "_ALL_CAP_RE", ".", "sub", "(", "r'\\1_\\2'", ",", "s1", ")", ".", "lower", "(", ")" ]
Converts a CamelCase word into an under_score word. >>> camelcase_to_underscores("CamelCaseCase") 'camel_case_case' >>> camelcase_to_underscores("getHTTPResponseCode") 'get_http_response_code'
[ "Converts", "a", "CamelCase", "word", "into", "an", "under_score", "word", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1134-L1146
def prj_create_seq(self, *args, **kwargs): """Create a new Sequence for the current project :returns: None :rtype: None :raises: None """ if not self.cur_prj: return seq = self.create_seq(project=self.cur_prj) if seq: seqdata = djitemdata.SequenceItemData(seq) treemodel.TreeItem(seqdata, self.prj_seq_model.root)
[ "def", "prj_create_seq", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "cur_prj", ":", "return", "seq", "=", "self", ".", "create_seq", "(", "project", "=", "self", ".", "cur_prj", ")", "if", "seq", ":", "seqdata", "=", "djitemdata", ".", "SequenceItemData", "(", "seq", ")", "treemodel", ".", "TreeItem", "(", "seqdata", ",", "self", ".", "prj_seq_model", ".", "root", ")" ]
Create a new Sequence for the current project :returns: None :rtype: None :raises: None
[ "Create", "a", "new", "Sequence", "for", "the", "current", "project" ]
python
train
ultrabug/py3status
py3status/modules/spotify.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/modules/spotify.py#L179-L189
def spotify(self): """ Get the current "artist - title" and return it. """ (text, color) = self._get_text() response = { "cached_until": self.py3.time_in(self.cache_timeout), "color": color, "full_text": text, } return response
[ "def", "spotify", "(", "self", ")", ":", "(", "text", ",", "color", ")", "=", "self", ".", "_get_text", "(", ")", "response", "=", "{", "\"cached_until\"", ":", "self", ".", "py3", ".", "time_in", "(", "self", ".", "cache_timeout", ")", ",", "\"color\"", ":", "color", ",", "\"full_text\"", ":", "text", ",", "}", "return", "response" ]
Get the current "artist - title" and return it.
[ "Get", "the", "current", "artist", "-", "title", "and", "return", "it", "." ]
python
train
dwkim78/upsilon
upsilon/datasets/base.py
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/datasets/base.py#L12-L39
def load_EROS_lc(filename='lm0010n22323.time'): """ Read an EROS light curve and return its data. Parameters ---------- filename : str, optional A light-curve filename. Returns ------- dates : numpy.ndarray An array of dates. magnitudes : numpy.ndarray An array of magnitudes. errors : numpy.ndarray An array of magnitudes errors. """ module_path = dirname(__file__) file_path = join(module_path, 'lightcurves', filename) data = np.loadtxt(file_path) date = data[:, 0] mag = data[:, 1] err = data[:, 2] return date, mag, err
[ "def", "load_EROS_lc", "(", "filename", "=", "'lm0010n22323.time'", ")", ":", "module_path", "=", "dirname", "(", "__file__", ")", "file_path", "=", "join", "(", "module_path", ",", "'lightcurves'", ",", "filename", ")", "data", "=", "np", ".", "loadtxt", "(", "file_path", ")", "date", "=", "data", "[", ":", ",", "0", "]", "mag", "=", "data", "[", ":", ",", "1", "]", "err", "=", "data", "[", ":", ",", "2", "]", "return", "date", ",", "mag", ",", "err" ]
Read an EROS light curve and return its data. Parameters ---------- filename : str, optional A light-curve filename. Returns ------- dates : numpy.ndarray An array of dates. magnitudes : numpy.ndarray An array of magnitudes. errors : numpy.ndarray An array of magnitudes errors.
[ "Read", "an", "EROS", "light", "curve", "and", "return", "its", "data", "." ]
python
train
jaraco/path.py
path/__init__.py
https://github.com/jaraco/path.py/blob/bbe7d99e7a64a004f866ace9ec12bd9b296908f5/path/__init__.py#L400-L411
def joinpath(cls, first, *others): """ Join first to zero or more :class:`Path` components, adding a separator character (:samp:`{first}.module.sep`) if needed. Returns a new instance of :samp:`{first}._next_class`. .. seealso:: :func:`os.path.join` """ if not isinstance(first, cls): first = cls(first) return first._next_class(first.module.join(first, *others))
[ "def", "joinpath", "(", "cls", ",", "first", ",", "*", "others", ")", ":", "if", "not", "isinstance", "(", "first", ",", "cls", ")", ":", "first", "=", "cls", "(", "first", ")", "return", "first", ".", "_next_class", "(", "first", ".", "module", ".", "join", "(", "first", ",", "*", "others", ")", ")" ]
Join first to zero or more :class:`Path` components, adding a separator character (:samp:`{first}.module.sep`) if needed. Returns a new instance of :samp:`{first}._next_class`. .. seealso:: :func:`os.path.join`
[ "Join", "first", "to", "zero", "or", "more", ":", "class", ":", "Path", "components", "adding", "a", "separator", "character", "(", ":", "samp", ":", "{", "first", "}", ".", "module", ".", "sep", ")", "if", "needed", ".", "Returns", "a", "new", "instance", "of", ":", "samp", ":", "{", "first", "}", ".", "_next_class", "." ]
python
train
mwhooker/jones
jones/client.py
https://github.com/mwhooker/jones/blob/121e89572ca063f456b8e94cbb8cbee26c307a8f/jones/client.py#L72-L78
def _config_changed(self, data, stat): """Called when config changes.""" self.config = json.loads(data) if self.cb: self.cb(self.config)
[ "def", "_config_changed", "(", "self", ",", "data", ",", "stat", ")", ":", "self", ".", "config", "=", "json", ".", "loads", "(", "data", ")", "if", "self", ".", "cb", ":", "self", ".", "cb", "(", "self", ".", "config", ")" ]
Called when config changes.
[ "Called", "when", "config", "changes", "." ]
python
train
toumorokoshi/sprinter
sprinter/external/pippuppet.py
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/external/pippuppet.py#L56-L67
def install_egg(self, egg_name): """ Install an egg into the egg directory """ if not os.path.exists(self.egg_directory): os.makedirs(self.egg_directory) self.requirement_set.add_requirement( InstallRequirement.from_line(egg_name, None)) try: self.requirement_set.prepare_files(self.finder) self.requirement_set.install(['--prefix=' + self.egg_directory], []) except DistributionNotFound: self.requirement_set.requirements._keys.remove(egg_name) raise PipException()
[ "def", "install_egg", "(", "self", ",", "egg_name", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "egg_directory", ")", ":", "os", ".", "makedirs", "(", "self", ".", "egg_directory", ")", "self", ".", "requirement_set", ".", "add_requirement", "(", "InstallRequirement", ".", "from_line", "(", "egg_name", ",", "None", ")", ")", "try", ":", "self", ".", "requirement_set", ".", "prepare_files", "(", "self", ".", "finder", ")", "self", ".", "requirement_set", ".", "install", "(", "[", "'--prefix='", "+", "self", ".", "egg_directory", "]", ",", "[", "]", ")", "except", "DistributionNotFound", ":", "self", ".", "requirement_set", ".", "requirements", ".", "_keys", ".", "remove", "(", "egg_name", ")", "raise", "PipException", "(", ")" ]
Install an egg into the egg directory
[ "Install", "an", "egg", "into", "the", "egg", "directory" ]
python
train
vaexio/vaex
packages/vaex-core/vaex/dataframe.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L1883-L1900
def byte_size(self, selection=False, virtual=False): """Return the size in bytes the whole DataFrame requires (or the selection), respecting the active_fraction.""" bytes_per_row = 0 N = self.count(selection=selection) extra = 0 for column in list(self.get_column_names(virtual=virtual)): dtype = self.dtype(column) dtype_internal = self.dtype(column, internal=True) #if dtype in [str_type, str] and dtype_internal.kind == 'O': if isinstance(self.columns[column], ColumnString): # TODO: document or fix this # is it too expensive to calculate this exactly? extra += self.columns[column].nbytes else: bytes_per_row += dtype_internal.itemsize if np.ma.isMaskedArray(self.columns[column]): bytes_per_row += 1 return bytes_per_row * self.count(selection=selection) + extra
[ "def", "byte_size", "(", "self", ",", "selection", "=", "False", ",", "virtual", "=", "False", ")", ":", "bytes_per_row", "=", "0", "N", "=", "self", ".", "count", "(", "selection", "=", "selection", ")", "extra", "=", "0", "for", "column", "in", "list", "(", "self", ".", "get_column_names", "(", "virtual", "=", "virtual", ")", ")", ":", "dtype", "=", "self", ".", "dtype", "(", "column", ")", "dtype_internal", "=", "self", ".", "dtype", "(", "column", ",", "internal", "=", "True", ")", "#if dtype in [str_type, str] and dtype_internal.kind == 'O':", "if", "isinstance", "(", "self", ".", "columns", "[", "column", "]", ",", "ColumnString", ")", ":", "# TODO: document or fix this", "# is it too expensive to calculate this exactly?", "extra", "+=", "self", ".", "columns", "[", "column", "]", ".", "nbytes", "else", ":", "bytes_per_row", "+=", "dtype_internal", ".", "itemsize", "if", "np", ".", "ma", ".", "isMaskedArray", "(", "self", ".", "columns", "[", "column", "]", ")", ":", "bytes_per_row", "+=", "1", "return", "bytes_per_row", "*", "self", ".", "count", "(", "selection", "=", "selection", ")", "+", "extra" ]
Return the size in bytes the whole DataFrame requires (or the selection), respecting the active_fraction.
[ "Return", "the", "size", "in", "bytes", "the", "whole", "DataFrame", "requires", "(", "or", "the", "selection", ")", "respecting", "the", "active_fraction", "." ]
python
test
lowandrew/OLCTools
spadespipeline/CHAS.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/CHAS.py#L24-L81
def primers(self): """Setup and create threads for ePCR""" # Create the threads for the ePCR analysis for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': threads = Thread(target=self.epcr, args=()) threads.setDaemon(True) threads.start() for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': setattr(sample, self.analysistype, GenObject()) # Get the primers ready try: sample[self.analysistype].primers = glob(os.path.join(self.reffilepath, self.analysistype, sample.general.referencegenus, 'primers', '*.txt'))[0] # Find the name of the probe file sample[self.analysistype].probes = glob(os.path.join(self.reffilepath, self.analysistype, sample.general.referencegenus, 'probes', '*.fa'))[0] # Create the BLAST database of the probes (if necessary) self.makeblastdb(sample[self.analysistype].probes) # Initialise a list to store the names of the targets sample[self.analysistype].targets = list() # Open the primer file, and read the names of the targets into a list with open(sample[self.analysistype].primers, 'r') as primerfile: for line in primerfile: sample[self.analysistype].targets.append(line.split('\t')[0]) # Organisms without primer/probe files will fail. Populate metadata with 'NA' values except IndexError: sample[self.analysistype].primers = 'NA' sample[self.analysistype].probes = 'NA' # Only try to process organisms with primer files if sample[self.analysistype].primers != 'NA': # Make the output path sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory, self.analysistype) make_path(sample[self.analysistype].reportdir) # Set the base name of the output file outfile = sample[self.analysistype].reportdir + sample.name # Set the hashing and mapping commands sample.commands.famap = 'famap -b {}.famap {}.fasta'.format(outfile, sample.general.filenoext) sample.commands.fahash = 'fahash -b {}.hash {}.famap'.format(outfile, outfile) # re-PCR uses the subtyping primers list to search the contigs file using the following parameters # -S {hash file} (Perform STS lookup using hash-file), -r + (Enable/disable reverse STS lookup) # -m 10000 (Set variability for STS size for lookup), # -n 1 (Set max allowed mismatches per primer for lookup) # -g 0 (Set max allowed indels per primer for lookup), # -G (Print alignments in comments), -o {output file} sample.commands.epcr = 're-PCR -S {}.hash -r + -m 10000 -n 2 -g 0 -G -q -o {}.txt {}' \ .format(outfile, outfile, sample[self.analysistype].primers) # Add the variables to the queue self.epcrqueue.put((sample, outfile)) self.epcrqueue.join()
[ "def", "primers", "(", "self", ")", ":", "# Create the threads for the ePCR analysis", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "threads", "=", "Thread", "(", "target", "=", "self", ".", "epcr", ",", "args", "=", "(", ")", ")", "threads", ".", "setDaemon", "(", "True", ")", "threads", ".", "start", "(", ")", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "setattr", "(", "sample", ",", "self", ".", "analysistype", ",", "GenObject", "(", ")", ")", "# Get the primers ready", "try", ":", "sample", "[", "self", ".", "analysistype", "]", ".", "primers", "=", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reffilepath", ",", "self", ".", "analysistype", ",", "sample", ".", "general", ".", "referencegenus", ",", "'primers'", ",", "'*.txt'", ")", ")", "[", "0", "]", "# Find the name of the probe file", "sample", "[", "self", ".", "analysistype", "]", ".", "probes", "=", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reffilepath", ",", "self", ".", "analysistype", ",", "sample", ".", "general", ".", "referencegenus", ",", "'probes'", ",", "'*.fa'", ")", ")", "[", "0", "]", "# Create the BLAST database of the probes (if necessary)", "self", ".", "makeblastdb", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "probes", ")", "# Initialise a list to store the names of the targets", "sample", "[", "self", ".", "analysistype", "]", ".", "targets", "=", "list", "(", ")", "# Open the primer file, and read the names of the targets into a list", "with", "open", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "primers", ",", "'r'", ")", "as", "primerfile", ":", "for", "line", "in", "primerfile", ":", "sample", "[", "self", ".", "analysistype", "]", ".", "targets", ".", "append", "(", "line", ".", "split", "(", "'\\t'", ")", "[", "0", "]", ")", "# Organisms without primer/probe files will fail. Populate metadata with 'NA' values", "except", "IndexError", ":", "sample", "[", "self", ".", "analysistype", "]", ".", "primers", "=", "'NA'", "sample", "[", "self", ".", "analysistype", "]", ".", "probes", "=", "'NA'", "# Only try to process organisms with primer files", "if", "sample", "[", "self", ".", "analysistype", "]", ".", "primers", "!=", "'NA'", ":", "# Make the output path", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", "=", "os", ".", "path", ".", "join", "(", "sample", ".", "general", ".", "outputdirectory", ",", "self", ".", "analysistype", ")", "make_path", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ")", "# Set the base name of the output file", "outfile", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", "+", "sample", ".", "name", "# Set the hashing and mapping commands", "sample", ".", "commands", ".", "famap", "=", "'famap -b {}.famap {}.fasta'", ".", "format", "(", "outfile", ",", "sample", ".", "general", ".", "filenoext", ")", "sample", ".", "commands", ".", "fahash", "=", "'fahash -b {}.hash {}.famap'", ".", "format", "(", "outfile", ",", "outfile", ")", "# re-PCR uses the subtyping primers list to search the contigs file using the following parameters", "# -S {hash file} (Perform STS lookup using hash-file), -r + (Enable/disable reverse STS lookup)", "# -m 10000 (Set variability for STS size for lookup),", "# -n 1 (Set max allowed mismatches per primer for lookup)", "# -g 0 (Set max allowed indels per primer for lookup),", "# -G (Print alignments in comments), -o {output file}", "sample", ".", "commands", ".", "epcr", "=", "'re-PCR -S {}.hash -r + -m 10000 -n 2 -g 0 -G -q -o {}.txt {}'", ".", "format", "(", "outfile", ",", "outfile", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "primers", ")", "# Add the variables to the queue", "self", ".", "epcrqueue", ".", "put", "(", "(", "sample", ",", "outfile", ")", ")", "self", ".", "epcrqueue", ".", "join", "(", ")" ]
Setup and create threads for ePCR
[ "Setup", "and", "create", "threads", "for", "ePCR" ]
python
train