repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
taskcluster/taskcluster-client.py
taskcluster/client.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/client.py#L255-L281
def _makeApiCall(self, entry, *args, **kwargs): """ This function is used to dispatch calls to other functions for a given API Reference entry""" x = self._processArgs(entry, *args, **kwargs) routeParams, payload, query, paginationHandler, paginationLimit = x route = self._subArgsInRoute(entry, routeParams) # TODO: Check for limit being in the Query of the api ref if paginationLimit and 'limit' in entry.get('query', []): query['limit'] = paginationLimit if query: _route = route + '?' + urllib.parse.urlencode(query) else: _route = route response = self._makeHttpRequest(entry['method'], _route, payload) if paginationHandler: paginationHandler(response) while response.get('continuationToken'): query['continuationToken'] = response['continuationToken'] _route = route + '?' + urllib.parse.urlencode(query) response = self._makeHttpRequest(entry['method'], _route, payload) paginationHandler(response) else: return response
[ "def", "_makeApiCall", "(", "self", ",", "entry", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "x", "=", "self", ".", "_processArgs", "(", "entry", ",", "*", "args", ",", "*", "*", "kwargs", ")", "routeParams", ",", "payload", ",", "query",...
This function is used to dispatch calls to other functions for a given API Reference entry
[ "This", "function", "is", "used", "to", "dispatch", "calls", "to", "other", "functions", "for", "a", "given", "API", "Reference", "entry" ]
python
train
chemlab/chemlab
chemlab/libs/chemspipy.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/libs/chemspipy.py#L183-L190
def image(self): """ Return string containing PNG binary image data of 2D structure image """ if self._image is None: apiurl = 'http://www.chemspider.com/Search.asmx/GetCompoundThumbnail?id=%s&token=%s' % (self.csid,TOKEN) response = urlopen(apiurl) tree = ET.parse(response) self._image = tree.getroot().text return self._image
[ "def", "image", "(", "self", ")", ":", "if", "self", ".", "_image", "is", "None", ":", "apiurl", "=", "'http://www.chemspider.com/Search.asmx/GetCompoundThumbnail?id=%s&token=%s'", "%", "(", "self", ".", "csid", ",", "TOKEN", ")", "response", "=", "urlopen", "("...
Return string containing PNG binary image data of 2D structure image
[ "Return", "string", "containing", "PNG", "binary", "image", "data", "of", "2D", "structure", "image" ]
python
train
MAVENSDC/cdflib
cdflib/cdfwrite.py
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1295-L1310
def _get_recrange(self, f, VXRoffset): ''' Finds the first and last record numbers pointed by the VXR Assumes the VXRs are in order ''' f.seek(VXRoffset+20) # Num entries numEntries = int.from_bytes(f.read(4), 'big', signed=True) # used entries usedEntries = int.from_bytes(f.read(4), 'big', signed=True) # VXR's First record firstRec = int.from_bytes(f.read(4), 'big', signed=True) # VXR's Last record f.seek(VXRoffset+28+(4*numEntries+4*(usedEntries-1))) lastRec = int.from_bytes(f.read(4), 'big', signed=True) return firstRec, lastRec
[ "def", "_get_recrange", "(", "self", ",", "f", ",", "VXRoffset", ")", ":", "f", ".", "seek", "(", "VXRoffset", "+", "20", ")", "# Num entries", "numEntries", "=", "int", ".", "from_bytes", "(", "f", ".", "read", "(", "4", ")", ",", "'big'", ",", "s...
Finds the first and last record numbers pointed by the VXR Assumes the VXRs are in order
[ "Finds", "the", "first", "and", "last", "record", "numbers", "pointed", "by", "the", "VXR", "Assumes", "the", "VXRs", "are", "in", "order" ]
python
train
sampsyo/confuse
confuse.py
https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L915-L922
def _add_user_source(self): """Add the configuration options from the YAML file in the user's configuration directory (given by `config_dir`) if it exists. """ filename = self.user_config_path() if os.path.isfile(filename): self.add(ConfigSource(load_yaml(filename) or {}, filename))
[ "def", "_add_user_source", "(", "self", ")", ":", "filename", "=", "self", ".", "user_config_path", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "self", ".", "add", "(", "ConfigSource", "(", "load_yaml", "(", "filename", ...
Add the configuration options from the YAML file in the user's configuration directory (given by `config_dir`) if it exists.
[ "Add", "the", "configuration", "options", "from", "the", "YAML", "file", "in", "the", "user", "s", "configuration", "directory", "(", "given", "by", "config_dir", ")", "if", "it", "exists", "." ]
python
train
tgbugs/pyontutils
ilxutils/ilxutils/simple_scicrunch_client.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L152-L160
def get_data_from_ilx(self, ilx_id): ''' Gets full meta data (expect their annotations and relationships) from is ILX ID ''' ilx_id = self.fix_ilx(ilx_id) url_base = self.base_path + "ilx/search/identifier/{identifier}?key={APIKEY}" url = url_base.format(identifier=ilx_id, APIKEY=self.APIKEY) output = self.get(url) # Can be a successful request, but not a successful response success = self.check_success(output) return output, success
[ "def", "get_data_from_ilx", "(", "self", ",", "ilx_id", ")", ":", "ilx_id", "=", "self", ".", "fix_ilx", "(", "ilx_id", ")", "url_base", "=", "self", ".", "base_path", "+", "\"ilx/search/identifier/{identifier}?key={APIKEY}\"", "url", "=", "url_base", ".", "form...
Gets full meta data (expect their annotations and relationships) from is ILX ID
[ "Gets", "full", "meta", "data", "(", "expect", "their", "annotations", "and", "relationships", ")", "from", "is", "ILX", "ID" ]
python
train
polyaxon/polyaxon
polyaxon/signals/deletion.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/signals/deletion.py#L56-L77
def experiment_group_pre_delete(sender, **kwargs): """Delete all group outputs.""" instance = kwargs['instance'] if instance.is_selection: return # Delete outputs and logs celery_app.send_task( SchedulerCeleryTasks.STORES_SCHEDULE_OUTPUTS_DELETION, kwargs={ 'persistence': instance.persistence_outputs, 'subpath': instance.subpath, }, countdown=conf.get('GLOBAL_COUNTDOWN')) celery_app.send_task( SchedulerCeleryTasks.STORES_SCHEDULE_LOGS_DELETION, kwargs={ 'persistence': instance.persistence_logs, 'subpath': instance.subpath, }, countdown=conf.get('GLOBAL_COUNTDOWN'))
[ "def", "experiment_group_pre_delete", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "kwargs", "[", "'instance'", "]", "if", "instance", ".", "is_selection", ":", "return", "# Delete outputs and logs", "celery_app", ".", "send_task", "(", "Sc...
Delete all group outputs.
[ "Delete", "all", "group", "outputs", "." ]
python
train
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L696-L700
def _overview_group(self): """Direct link to the overview group""" if self._overview_group_ is None: self._overview_group_ = self._all_create_or_get_groups('overview')[0] return self._overview_group_
[ "def", "_overview_group", "(", "self", ")", ":", "if", "self", ".", "_overview_group_", "is", "None", ":", "self", ".", "_overview_group_", "=", "self", ".", "_all_create_or_get_groups", "(", "'overview'", ")", "[", "0", "]", "return", "self", ".", "_overvie...
Direct link to the overview group
[ "Direct", "link", "to", "the", "overview", "group" ]
python
test
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_nameserver.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_nameserver.py#L87-L101
def get_nameserver_detail_output_show_nameserver_nameserver_fc4s(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop('nameserver_portid') nameserver_fc4s = ET.SubElement(show_nameserver, "nameserver-fc4s") nameserver_fc4s.text = kwargs.pop('nameserver_fc4s') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_nameserver_detail_output_show_nameserver_nameserver_fc4s", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_nameserver_detail", "=", "ET", ".", "Element", "(", "\"get_nameserver_detail\"", ...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
log2timeline/dfdatetime
dfdatetime/interface.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/interface.py#L389-L503
def _CopyTimeFromString(self, time_string): """Copies a time from a string. Args: time_string (str): time value formatted as: hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The seconds fraction and time zone offset are optional. Returns: tuple[int, int, int, int, int]: hours, minutes, seconds, microseconds, time zone offset in minutes. Raises: ValueError: if the time string is invalid or not supported. """ time_string_length = len(time_string) # The time string should at least contain 'hh:mm:ss'. if time_string_length < 8: raise ValueError('Time string too short.') if time_string[2] != ':' or time_string[5] != ':': raise ValueError('Invalid time string.') try: hours = int(time_string[0:2], 10) except ValueError: raise ValueError('Unable to parse hours.') if hours not in range(0, 24): raise ValueError('Hours value: {0:d} out of bounds.'.format(hours)) try: minutes = int(time_string[3:5], 10) except ValueError: raise ValueError('Unable to parse minutes.') if minutes not in range(0, 60): raise ValueError('Minutes value: {0:d} out of bounds.'.format(minutes)) try: seconds = int(time_string[6:8], 10) except ValueError: raise ValueError('Unable to parse day of seconds.') # TODO: support a leap second? if seconds not in range(0, 60): raise ValueError('Seconds value: {0:d} out of bounds.'.format(seconds)) microseconds = None time_zone_offset = None time_zone_string_index = 8 while time_zone_string_index < time_string_length: if time_string[time_zone_string_index] in ('+', '-'): break time_zone_string_index += 1 # The calculations that follow rely on the time zone string index # to point beyond the string in case no time zone offset was defined. if time_zone_string_index == time_string_length - 1: time_zone_string_index += 1 if time_string_length > 8 and time_string[8] == '.': time_fraction_length = time_zone_string_index - 9 if time_fraction_length not in (3, 6): raise ValueError('Invalid time string.') try: time_fraction = time_string[9:time_zone_string_index] time_fraction = int(time_fraction, 10) except ValueError: raise ValueError('Unable to parse time fraction.') if time_fraction_length == 3: time_fraction *= 1000 microseconds = time_fraction if time_zone_string_index < time_string_length: if (time_string_length - time_zone_string_index != 6 or time_string[time_zone_string_index + 3] != ':'): raise ValueError('Invalid time string.') try: hours_from_utc = int(time_string[ time_zone_string_index + 1:time_zone_string_index + 3]) except ValueError: raise ValueError('Unable to parse time zone hours offset.') if hours_from_utc not in range(0, 15): raise ValueError('Time zone hours offset value out of bounds.') try: minutes_from_utc = int(time_string[ time_zone_string_index + 4:time_zone_string_index + 6]) except ValueError: raise ValueError('Unable to parse time zone minutes offset.') if minutes_from_utc not in range(0, 60): raise ValueError('Time zone minutes offset value out of bounds.') # pylint: disable=invalid-unary-operand-type time_zone_offset = (hours_from_utc * 60) + minutes_from_utc # Note that when the sign of the time zone offset is negative # the difference needs to be added. We do so by flipping the sign. if time_string[time_zone_string_index] != '-': time_zone_offset = -time_zone_offset return hours, minutes, seconds, microseconds, time_zone_offset
[ "def", "_CopyTimeFromString", "(", "self", ",", "time_string", ")", ":", "time_string_length", "=", "len", "(", "time_string", ")", "# The time string should at least contain 'hh:mm:ss'.", "if", "time_string_length", "<", "8", ":", "raise", "ValueError", "(", "'Time str...
Copies a time from a string. Args: time_string (str): time value formatted as: hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The seconds fraction and time zone offset are optional. Returns: tuple[int, int, int, int, int]: hours, minutes, seconds, microseconds, time zone offset in minutes. Raises: ValueError: if the time string is invalid or not supported.
[ "Copies", "a", "time", "from", "a", "string", "." ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/led.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/led.py#L140-L145
def brightness(self): """ Sets the brightness level. Possible values are from 0 to `max_brightness`. """ self._brightness, value = self.get_attr_int(self._brightness, 'brightness') return value
[ "def", "brightness", "(", "self", ")", ":", "self", ".", "_brightness", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_brightness", ",", "'brightness'", ")", "return", "value" ]
Sets the brightness level. Possible values are from 0 to `max_brightness`.
[ "Sets", "the", "brightness", "level", ".", "Possible", "values", "are", "from", "0", "to", "max_brightness", "." ]
python
train
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L516-L529
def move(self, path, destination): """ Move a path to destination :param path: source :param destination: destination :return: """ args = { 'path': path, 'destination': destination, } return self._client.json('filesystem.move', args)
[ "def", "move", "(", "self", ",", "path", ",", "destination", ")", ":", "args", "=", "{", "'path'", ":", "path", ",", "'destination'", ":", "destination", ",", "}", "return", "self", ".", "_client", ".", "json", "(", "'filesystem.move'", ",", "args", ")...
Move a path to destination :param path: source :param destination: destination :return:
[ "Move", "a", "path", "to", "destination" ]
python
train
TUT-ARG/sed_eval
evaluators/sound_event_eval.py
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/evaluators/sound_event_eval.py#L94-L144
def main(argv): """Main """ parameters = process_arguments(argv) file_list = sed_eval.io.load_file_pair_list(parameters['file_list']) path = os.path.dirname(parameters['file_list']) data = [] all_data = dcase_util.containers.MetaDataContainer() for file_pair in file_list: reference_event_list = sed_eval.io.load_event_list( os.path.abspath(os.path.join(path, file_pair['reference_file'])) ) estimated_event_list = sed_eval.io.load_event_list( os.path.abspath(os.path.join(path, file_pair['estimated_file'])) ) data.append({ 'reference_event_list': reference_event_list, 'estimated_event_list': estimated_event_list }) all_data += reference_event_list event_labels = all_data.unique_event_labels segment_based_metrics = sed_eval.sound_event.SegmentBasedMetrics(event_labels) event_based_metrics = sed_eval.sound_event.EventBasedMetrics(event_labels) for file_pair in data: segment_based_metrics.evaluate( file_pair['reference_event_list'], file_pair['estimated_event_list'] ) event_based_metrics.evaluate( file_pair['reference_event_list'], file_pair['estimated_event_list'] ) if parameters['output_file']: results = dcase_util.containers.DictContainer({ 'segment_based_metrics': segment_based_metrics.results(), 'event_based_metrics': event_based_metrics.results() }).save(parameters['output_file']) else: print(segment_based_metrics) print(event_based_metrics)
[ "def", "main", "(", "argv", ")", ":", "parameters", "=", "process_arguments", "(", "argv", ")", "file_list", "=", "sed_eval", ".", "io", ".", "load_file_pair_list", "(", "parameters", "[", "'file_list'", "]", ")", "path", "=", "os", ".", "path", ".", "di...
Main
[ "Main" ]
python
train
nerdvegas/rez
src/rez/package_search.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_search.py#L359-L374
def format_search_results(self, search_results): """Format search results. Args: search_results (list of `ResourceSearchResult`): Search to format. Returns: List of 2-tuple: Text and color to print in. """ formatted_lines = [] for search_result in search_results: lines = self._format_search_result(search_result) formatted_lines.extend(lines) return formatted_lines
[ "def", "format_search_results", "(", "self", ",", "search_results", ")", ":", "formatted_lines", "=", "[", "]", "for", "search_result", "in", "search_results", ":", "lines", "=", "self", ".", "_format_search_result", "(", "search_result", ")", "formatted_lines", "...
Format search results. Args: search_results (list of `ResourceSearchResult`): Search to format. Returns: List of 2-tuple: Text and color to print in.
[ "Format", "search", "results", "." ]
python
train
andreafioraldi/angrdbg
angrdbg/page_7.py
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/page_7.py#L92-L115
def load_slice(self, state, start, end): """ Return the memory objects overlapping with the provided slice. :param start: the start address :param end: the end address (non-inclusive) :returns: tuples of (starting_addr, memory_object) """ items = [] if start > self._page_addr + self._page_size or end < self._page_addr: l.warning("Calling load_slice on the wrong page.") return items for addr in range(max(start, self._page_addr), min( end, self._page_addr + self._page_size)): i = addr - self._page_addr mo = self._storage[i] if mo is None and hasattr(self, "from_dbg"): byte_val = get_debugger().get_byte(addr) mo = SimMemoryObject(claripy.BVV(byte_val, 8), addr) self._storage[i] = mo if mo is not None and (not items or items[-1][1] is not mo): items.append((addr, mo)) #print filter(lambda x: x != None, self._storage) return items
[ "def", "load_slice", "(", "self", ",", "state", ",", "start", ",", "end", ")", ":", "items", "=", "[", "]", "if", "start", ">", "self", ".", "_page_addr", "+", "self", ".", "_page_size", "or", "end", "<", "self", ".", "_page_addr", ":", "l", ".", ...
Return the memory objects overlapping with the provided slice. :param start: the start address :param end: the end address (non-inclusive) :returns: tuples of (starting_addr, memory_object)
[ "Return", "the", "memory", "objects", "overlapping", "with", "the", "provided", "slice", ".", ":", "param", "start", ":", "the", "start", "address", ":", "param", "end", ":", "the", "end", "address", "(", "non", "-", "inclusive", ")", ":", "returns", ":"...
python
train
saltstack/salt
salt/utils/jinja.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/jinja.py#L92-L98
def check_cache(self, template): ''' Cache a file only once ''' if template not in self.cached: self.cache_file(template) self.cached.append(template)
[ "def", "check_cache", "(", "self", ",", "template", ")", ":", "if", "template", "not", "in", "self", ".", "cached", ":", "self", ".", "cache_file", "(", "template", ")", "self", ".", "cached", ".", "append", "(", "template", ")" ]
Cache a file only once
[ "Cache", "a", "file", "only", "once" ]
python
train
pantsbuild/pants
src/python/pants/reporting/reporting_server.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/reporting/reporting_server.py#L82-L88
def _handle_runs(self, relpath, params): """Show a listing of all pants runs since the last clean-all.""" runs_by_day = self._partition_runs_by_day() args = self._default_template_args('run_list.html') args['runs_by_day'] = runs_by_day content = self._renderer.render_name('base.html', args).encode("utf-8") self._send_content(content, 'text/html')
[ "def", "_handle_runs", "(", "self", ",", "relpath", ",", "params", ")", ":", "runs_by_day", "=", "self", ".", "_partition_runs_by_day", "(", ")", "args", "=", "self", ".", "_default_template_args", "(", "'run_list.html'", ")", "args", "[", "'runs_by_day'", "]"...
Show a listing of all pants runs since the last clean-all.
[ "Show", "a", "listing", "of", "all", "pants", "runs", "since", "the", "last", "clean", "-", "all", "." ]
python
train
edeposit/edeposit.amqp.harvester
src/edeposit/amqp/harvester/filters/aleph_filter.py
https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/filters/aleph_filter.py#L20-L41
def name_to_vector(name): """ Convert `name` to the ASCII vector. Example: >>> name_to_vector("ing. Franta Putšálek") ['putsalek', 'franta', 'ing'] Args: name (str): Name which will be vectorized. Returns: list: Vector created from name. """ if not isinstance(name, unicode): name = name.decode("utf-8") name = name.lower() name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore') name = "".join(filter(lambda x: x.isalpha() or x == " ", list(name))) return sorted(name.split(), key=lambda x: len(x), reverse=True)
[ "def", "name_to_vector", "(", "name", ")", ":", "if", "not", "isinstance", "(", "name", ",", "unicode", ")", ":", "name", "=", "name", ".", "decode", "(", "\"utf-8\"", ")", "name", "=", "name", ".", "lower", "(", ")", "name", "=", "unicodedata", ".",...
Convert `name` to the ASCII vector. Example: >>> name_to_vector("ing. Franta Putšálek") ['putsalek', 'franta', 'ing'] Args: name (str): Name which will be vectorized. Returns: list: Vector created from name.
[ "Convert", "name", "to", "the", "ASCII", "vector", "." ]
python
train
apache/incubator-mxnet
example/ssd/dataset/pycocotools/coco.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/dataset/pycocotools/coco.py#L152-L172
def getCatIds(self, catNms=[], supNms=[], catIds=[]): """ filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids """ catNms = catNms if type(catNms) == list else [catNms] supNms = supNms if type(supNms) == list else [supNms] catIds = catIds if type(catIds) == list else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids
[ "def", "getCatIds", "(", "self", ",", "catNms", "=", "[", "]", ",", "supNms", "=", "[", "]", ",", "catIds", "=", "[", "]", ")", ":", "catNms", "=", "catNms", "if", "type", "(", "catNms", ")", "==", "list", "else", "[", "catNms", "]", "supNms", ...
filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids
[ "filtering", "parameters", ".", "default", "skips", "that", "filter", ".", ":", "param", "catNms", "(", "str", "array", ")", ":", "get", "cats", "for", "given", "cat", "names", ":", "param", "supNms", "(", "str", "array", ")", ":", "get", "cats", "for"...
python
train
angr/angr
angr/state_plugins/javavm_memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/javavm_memory.py#L198-L252
def load_array_elements(self, array, start_idx, no_of_elements): """ Loads either a single element or a range of elements from the array. :param array: Reference to the array. :param start_idx: Starting index for the load. :param no_of_elements: Number of elements to load. """ # concretize start index concrete_start_idxes = self.concretize_load_idx(start_idx) if len(concrete_start_idxes) == 1: # only one start index # => concrete load concrete_start_idx = concrete_start_idxes[0] load_values = [self._load_array_element_from_heap(array, idx) for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)] # if the index was symbolic before concretization, this # constraint it to concrete start idx self.state.solver.add(start_idx == concrete_start_idx) else: # multiple start indexes # => symbolic load # start with load values for the first concrete index concrete_start_idx = concrete_start_idxes[0] load_values = [self._load_array_element_from_heap(array, idx) for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)] start_idx_options = [concrete_start_idx == start_idx] # update load values with all remaining start indexes for concrete_start_idx in concrete_start_idxes[1:]: # load values for this start index values = [self._load_array_element_from_heap(array, idx) for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)] # update load values with the new ones for i, value in enumerate(values): # condition every value with the start idx # => if concrete_start_idx == start_idx # then use new value # else use the current value load_values[i] = self.state.solver.If( concrete_start_idx == start_idx, value, load_values[i] ) start_idx_options.append(start_idx == concrete_start_idx) # constraint start_idx, s.t. it evals to one of the concretized indexes constraint_on_start_idx = self.state.solver.Or(*start_idx_options) self.state.add_constraints(constraint_on_start_idx) return load_values
[ "def", "load_array_elements", "(", "self", ",", "array", ",", "start_idx", ",", "no_of_elements", ")", ":", "# concretize start index", "concrete_start_idxes", "=", "self", ".", "concretize_load_idx", "(", "start_idx", ")", "if", "len", "(", "concrete_start_idxes", ...
Loads either a single element or a range of elements from the array. :param array: Reference to the array. :param start_idx: Starting index for the load. :param no_of_elements: Number of elements to load.
[ "Loads", "either", "a", "single", "element", "or", "a", "range", "of", "elements", "from", "the", "array", "." ]
python
train
cenkalti/kuyruk
kuyruk/worker.py
https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L81-L115
def run(self) -> None: """Runs the worker and consumes messages from RabbitMQ. Returns only after `shutdown()` is called. """ if self._logging_level: logging.basicConfig( level=getattr(logging, self._logging_level.upper()), format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s") signal.signal(signal.SIGINT, self._handle_sigint) signal.signal(signal.SIGTERM, self._handle_sigterm) if platform.system() != 'Windows': # These features will not be available on Windows, but that is OK. # Read this issue for more details: # https://github.com/cenkalti/kuyruk/issues/54 signal.signal(signal.SIGHUP, self._handle_sighup) signal.signal(signal.SIGUSR1, self._handle_sigusr1) signal.signal(signal.SIGUSR2, self._handle_sigusr2) self._started_at = os.times().elapsed for t in self._threads: t.start() try: signals.worker_start.send(self.kuyruk, worker=self) self._consume_messages() signals.worker_shutdown.send(self.kuyruk, worker=self) finally: self.shutdown_pending.set() for t in self._threads: t.join() logger.debug("End run worker")
[ "def", "run", "(", "self", ")", "->", "None", ":", "if", "self", ".", "_logging_level", ":", "logging", ".", "basicConfig", "(", "level", "=", "getattr", "(", "logging", ",", "self", ".", "_logging_level", ".", "upper", "(", ")", ")", ",", "format", ...
Runs the worker and consumes messages from RabbitMQ. Returns only after `shutdown()` is called.
[ "Runs", "the", "worker", "and", "consumes", "messages", "from", "RabbitMQ", ".", "Returns", "only", "after", "shutdown", "()", "is", "called", "." ]
python
train
brechtm/rinohtype
src/rinoh/backend/pdf/xobject/purepng.py
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/backend/pdf/xobject/purepng.py#L582-L604
def __undo_filter_paeth(self, scanline): """Undo Paeth filter.""" ai = -self.fu previous = self.prev for i in range(len(scanline)): x = scanline[i] if ai < 0: pr = previous[i] # a = c = 0 else: a = scanline[ai] # result c = previous[ai] b = previous[i] pa = abs(b - c) # b pb = abs(a - c) # 0 pc = abs(a + b - c - c) # b if pa <= pb and pa <= pc: # False pr = a elif pb <= pc: # True pr = b else: pr = c scanline[i] = (x + pr) & 0xff # result ai += 1
[ "def", "__undo_filter_paeth", "(", "self", ",", "scanline", ")", ":", "ai", "=", "-", "self", ".", "fu", "previous", "=", "self", ".", "prev", "for", "i", "in", "range", "(", "len", "(", "scanline", ")", ")", ":", "x", "=", "scanline", "[", "i", ...
Undo Paeth filter.
[ "Undo", "Paeth", "filter", "." ]
python
train
square/pylink
pylink/__main__.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/__main__.py#L570-L603
def main(args=None): """Main command-line interface entrypoint. Runs the given subcommand or argument that were specified. If not given a ``args`` parameter, assumes the arguments are passed on the command-line. Args: args (list): list of command-line arguments Returns: Zero on success, non-zero otherwise. """ if args is None: args = sys.argv[1:] parser = create_parser() args = parser.parse_args(args) if args.verbose >= 2: level = logging.DEBUG elif args.verbose >= 1: level = logging.INFO else: level = logging.WARNING logging.basicConfig(level=level) try: args.command(args) except pylink.JLinkException as e: sys.stderr.write('Error: %s%s' % (str(e), os.linesep)) return 1 return 0
[ "def", "main", "(", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", "parser", "=", "create_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", "args", ")", "if", ...
Main command-line interface entrypoint. Runs the given subcommand or argument that were specified. If not given a ``args`` parameter, assumes the arguments are passed on the command-line. Args: args (list): list of command-line arguments Returns: Zero on success, non-zero otherwise.
[ "Main", "command", "-", "line", "interface", "entrypoint", "." ]
python
train
aegirhall/console-menu
consolemenu/menu_component.py
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/menu_component.py#L308-L317
def show_item_top_border(self, item_text, flag): """ Sets a flag that will show a top border for an item with the specified text. :param item_text: the text property of the item :param flag: boolean specifying if the border should be shown. """ if flag: self.__top_border_dict[item_text] = True else: self.__top_border_dict.pop(item_text, None)
[ "def", "show_item_top_border", "(", "self", ",", "item_text", ",", "flag", ")", ":", "if", "flag", ":", "self", ".", "__top_border_dict", "[", "item_text", "]", "=", "True", "else", ":", "self", ".", "__top_border_dict", ".", "pop", "(", "item_text", ",", ...
Sets a flag that will show a top border for an item with the specified text. :param item_text: the text property of the item :param flag: boolean specifying if the border should be shown.
[ "Sets", "a", "flag", "that", "will", "show", "a", "top", "border", "for", "an", "item", "with", "the", "specified", "text", ".", ":", "param", "item_text", ":", "the", "text", "property", "of", "the", "item", ":", "param", "flag", ":", "boolean", "spec...
python
train
buckket/twtxt
twtxt/config.py
https://github.com/buckket/twtxt/blob/6c8ad8ef3cbcf0dd335a12285d8b6bbdf93ce851/twtxt/config.py#L63-L93
def create_config(cls, cfgfile, nick, twtfile, twturl, disclose_identity, add_news): """Create a new config file at the default location. :param str cfgfile: path to the config file :param str nick: nickname to use for own tweets :param str twtfile: path to the local twtxt file :param str twturl: URL to the remote twtxt file :param bool disclose_identity: if true the users id will be disclosed :param bool add_news: if true follow twtxt news feed """ cfgfile_dir = os.path.dirname(cfgfile) if not os.path.exists(cfgfile_dir): os.makedirs(cfgfile_dir) cfg = configparser.ConfigParser() cfg.add_section("twtxt") cfg.set("twtxt", "nick", nick) cfg.set("twtxt", "twtfile", twtfile) cfg.set("twtxt", "twturl", twturl) cfg.set("twtxt", "disclose_identity", str(disclose_identity)) cfg.set("twtxt", "character_limit", "140") cfg.set("twtxt", "character_warning", "140") cfg.add_section("following") if add_news: cfg.set("following", "twtxt", "https://buckket.org/twtxt_news.txt") conf = cls(cfgfile, cfg) conf.write_config() return conf
[ "def", "create_config", "(", "cls", ",", "cfgfile", ",", "nick", ",", "twtfile", ",", "twturl", ",", "disclose_identity", ",", "add_news", ")", ":", "cfgfile_dir", "=", "os", ".", "path", ".", "dirname", "(", "cfgfile", ")", "if", "not", "os", ".", "pa...
Create a new config file at the default location. :param str cfgfile: path to the config file :param str nick: nickname to use for own tweets :param str twtfile: path to the local twtxt file :param str twturl: URL to the remote twtxt file :param bool disclose_identity: if true the users id will be disclosed :param bool add_news: if true follow twtxt news feed
[ "Create", "a", "new", "config", "file", "at", "the", "default", "location", "." ]
python
valid
tjvr/kurt
kurt/__init__.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L2445-L2454
def load(cls, path): """Load Waveform from file.""" assert os.path.exists(path), "No such file: %r" % path (folder, filename) = os.path.split(path) (name, extension) = os.path.splitext(filename) wave = Waveform(None) wave._path = path return wave
[ "def", "load", "(", "cls", ",", "path", ")", ":", "assert", "os", ".", "path", ".", "exists", "(", "path", ")", ",", "\"No such file: %r\"", "%", "path", "(", "folder", ",", "filename", ")", "=", "os", ".", "path", ".", "split", "(", "path", ")", ...
Load Waveform from file.
[ "Load", "Waveform", "from", "file", "." ]
python
train
biocommons/eutils
eutils/client.py
https://github.com/biocommons/eutils/blob/0ec7444fd520d2af56114122442ff8f60952db0b/eutils/client.py#L52-L68
def einfo(self, db=None): """query the einfo endpoint :param db: string (optional) :rtype: EInfo or EInfoDB object If db is None, the reply is a list of databases, which is returned in an EInfo object (which has a databases() method). If db is not None, the reply is information about the specified database, which is returned in an EInfoDB object. (Version 2.0 data is automatically requested.) """ if db is None: return EInfoResult(self._qs.einfo()).dblist return EInfoResult(self._qs.einfo({'db': db, 'version': '2.0'})).dbinfo
[ "def", "einfo", "(", "self", ",", "db", "=", "None", ")", ":", "if", "db", "is", "None", ":", "return", "EInfoResult", "(", "self", ".", "_qs", ".", "einfo", "(", ")", ")", ".", "dblist", "return", "EInfoResult", "(", "self", ".", "_qs", ".", "ei...
query the einfo endpoint :param db: string (optional) :rtype: EInfo or EInfoDB object If db is None, the reply is a list of databases, which is returned in an EInfo object (which has a databases() method). If db is not None, the reply is information about the specified database, which is returned in an EInfoDB object. (Version 2.0 data is automatically requested.)
[ "query", "the", "einfo", "endpoint" ]
python
train
9b/google-alerts
google_alerts/__init__.py
https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L382-L403
def delete(self, monitor_id): """Delete a monitor by ID.""" if not self._state: raise InvalidState("State was not properly obtained from the app") monitors = self.list() # Get the latest set of monitors bit = None for monitor in monitors: if monitor_id != monitor['monitor_id']: continue bit = monitor['monitor_id'] if not bit: raise MonitorNotFound("No monitor was found with that term.") url = self.ALERTS_DELETE_URL.format(requestX=self._state[3]) self._log.debug("Deleting alert using: %s" % url) payload = [None, monitor_id] params = json.dumps(payload, separators=(',', ':')) data = {'params': params} response = self._session.post(url, data=data, headers=self.HEADERS) if response.status_code != 200: raise ActionError("Failed to delete by ID: %s" % response.content) return True
[ "def", "delete", "(", "self", ",", "monitor_id", ")", ":", "if", "not", "self", ".", "_state", ":", "raise", "InvalidState", "(", "\"State was not properly obtained from the app\"", ")", "monitors", "=", "self", ".", "list", "(", ")", "# Get the latest set of moni...
Delete a monitor by ID.
[ "Delete", "a", "monitor", "by", "ID", "." ]
python
train
mitsei/dlkit
dlkit/json_/authorization/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/objects.py#L370-L383
def _init_map(self, record_types=None, **kwargs): """Initialize form map""" osid_objects.OsidRelationshipForm._init_map(self, record_types=record_types) self._my_map['assignedVaultIds'] = [str(kwargs['vault_id'])] self._my_map['functionId'] = str(kwargs['function_id']) self._my_map['qualifierId'] = str(kwargs['qualifier_id']) self._my_map['agentId'] = None self._my_map['resourceId'] = None self._my_map['trustId'] = None self._my_map['implicit'] = None if 'agent_id' in kwargs: self._my_map['agentId'] = str(kwargs['agent_id']) if 'resource_id' in kwargs: self._my_map['resourceId'] = str(kwargs['resource_id'])
[ "def", "_init_map", "(", "self", ",", "record_types", "=", "None", ",", "*", "*", "kwargs", ")", ":", "osid_objects", ".", "OsidRelationshipForm", ".", "_init_map", "(", "self", ",", "record_types", "=", "record_types", ")", "self", ".", "_my_map", "[", "'...
Initialize form map
[ "Initialize", "form", "map" ]
python
train
gijzelaerr/python-snap7
snap7/util.py
https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/util.py#L355-L361
def get_bytearray(self): """ return bytearray from self or DB parent """ if isinstance(self._bytearray, DB): return self._bytearray._bytearray return self._bytearray
[ "def", "get_bytearray", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "_bytearray", ",", "DB", ")", ":", "return", "self", ".", "_bytearray", ".", "_bytearray", "return", "self", ".", "_bytearray" ]
return bytearray from self or DB parent
[ "return", "bytearray", "from", "self", "or", "DB", "parent" ]
python
train
kieferk/dfply
dfply/reshape.py
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/reshape.py#L143-L201
def spread(df, key, values, convert=False): """ Transforms a "long" DataFrame into a "wide" format using a key and value column. If you have a mixed datatype column in your long-format DataFrame then the default behavior is for the spread columns to be of type `object`, or string. If you want to try to convert dtypes when spreading, you can set the convert keyword argument in spread to True. Args: key (str, int, or symbolic): Label for the key column. values (str, int, or symbolic): Label for the values column. Kwargs: convert (bool): Boolean indicating whether or not to try and convert the spread columns to more appropriate data types. Example: widened = elongated >> spread(X.variable, X.value) widened >> head(5) _ID carat clarity color cut depth price table x y z 0 0 0.23 SI2 E Ideal 61.5 326 55 3.95 3.98 2.43 1 1 0.21 SI1 E Premium 59.8 326 61 3.89 3.84 2.31 2 10 0.3 SI1 J Good 64 339 55 4.25 4.28 2.73 3 100 0.75 SI1 D Very Good 63.2 2760 56 5.8 5.75 3.65 4 1000 0.75 SI1 D Ideal 62.3 2898 55 5.83 5.8 3.62 """ # Taken mostly from dplython package columns = df.columns.tolist() id_cols = [col for col in columns if not col in [key, values]] temp_index = ['' for i in range(len(df))] for id_col in id_cols: temp_index += df[id_col].map(str) out_df = df.assign(temp_index=temp_index) out_df = out_df.set_index('temp_index') spread_data = out_df[[key, values]] if not all(spread_data.groupby([spread_data.index, key]).agg( 'count').reset_index()[values] < 2): raise ValueError('Duplicate identifiers') spread_data = spread_data.pivot(columns=key, values=values) if convert and (out_df[values].dtype.kind in 'OSaU'): columns_to_convert = [col for col in spread_data if col not in columns] spread_data = convert_type(spread_data, columns_to_convert) out_df = out_df[id_cols].drop_duplicates() out_df = out_df.merge(spread_data, left_index=True, right_index=True).reset_index(drop=True) out_df = (out_df >> arrange(id_cols)).reset_index(drop=True) return out_df
[ "def", "spread", "(", "df", ",", "key", ",", "values", ",", "convert", "=", "False", ")", ":", "# Taken mostly from dplython package", "columns", "=", "df", ".", "columns", ".", "tolist", "(", ")", "id_cols", "=", "[", "col", "for", "col", "in", "columns...
Transforms a "long" DataFrame into a "wide" format using a key and value column. If you have a mixed datatype column in your long-format DataFrame then the default behavior is for the spread columns to be of type `object`, or string. If you want to try to convert dtypes when spreading, you can set the convert keyword argument in spread to True. Args: key (str, int, or symbolic): Label for the key column. values (str, int, or symbolic): Label for the values column. Kwargs: convert (bool): Boolean indicating whether or not to try and convert the spread columns to more appropriate data types. Example: widened = elongated >> spread(X.variable, X.value) widened >> head(5) _ID carat clarity color cut depth price table x y z 0 0 0.23 SI2 E Ideal 61.5 326 55 3.95 3.98 2.43 1 1 0.21 SI1 E Premium 59.8 326 61 3.89 3.84 2.31 2 10 0.3 SI1 J Good 64 339 55 4.25 4.28 2.73 3 100 0.75 SI1 D Very Good 63.2 2760 56 5.8 5.75 3.65 4 1000 0.75 SI1 D Ideal 62.3 2898 55 5.83 5.8 3.62
[ "Transforms", "a", "long", "DataFrame", "into", "a", "wide", "format", "using", "a", "key", "and", "value", "column", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/util/bottle3.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle3.py#L577-L600
def path_shift(self, count=1): ''' Shift some levels of PATH_INFO into SCRIPT_NAME and return the moved part. count defaults to 1''' #/a/b/ /c/d --> 'a','b' 'c','d' if count == 0: return '' pathlist = self.path.strip('/').split('/') scriptlist = self.environ.get('SCRIPT_NAME','/').strip('/').split('/') if pathlist and pathlist[0] == '': pathlist = [] if scriptlist and scriptlist[0] == '': scriptlist = [] if count > 0 and count <= len(pathlist): moved = pathlist[:count] scriptlist = scriptlist + moved pathlist = pathlist[count:] elif count < 0 and count >= -len(scriptlist): moved = scriptlist[count:] pathlist = moved + pathlist scriptlist = scriptlist[:count] else: empty = 'SCRIPT_NAME' if count < 0 else 'PATH_INFO' raise AssertionError("Cannot shift. Nothing left from %s" % empty) self['PATH_INFO'] = self.path = '/' + '/'.join(pathlist) \ + ('/' if self.path.endswith('/') and pathlist else '') self['SCRIPT_NAME'] = '/' + '/'.join(scriptlist) return '/'.join(moved)
[ "def", "path_shift", "(", "self", ",", "count", "=", "1", ")", ":", "#/a/b/ /c/d --> 'a','b' 'c','d'", "if", "count", "==", "0", ":", "return", "''", "pathlist", "=", "self", ".", "path", ".", "strip", "(", "'/'", ")", ".", "split", "(", "'/'", ")",...
Shift some levels of PATH_INFO into SCRIPT_NAME and return the moved part. count defaults to 1
[ "Shift", "some", "levels", "of", "PATH_INFO", "into", "SCRIPT_NAME", "and", "return", "the", "moved", "part", ".", "count", "defaults", "to", "1" ]
python
train
mallamanis/experimenter
experimenter/data.py
https://github.com/mallamanis/experimenter/blob/2ed5ce85084cc47251ccba3aae0cb3431fbe4259/experimenter/data.py#L13-L32
def experiment_data(self, commit=None, must_contain_results=False): """ :param commit: the commit that all the experiments should have happened or None to include all :type commit: str :param must_contain_results: include only tags that contain results :type must_contain_results: bool :return: all the experiment data :rtype: dict """ results = {} for tag in self.__repository.tags: if not tag.name.startswith(self.__tag_prefix): continue data = json.loads(tag.tag.message) if "results" not in data and must_contain_results: continue if commit is not None and tag.tag.object.hexsha != name_to_object(self.__repository, commit).hexsha: continue results[tag.name] = data return results
[ "def", "experiment_data", "(", "self", ",", "commit", "=", "None", ",", "must_contain_results", "=", "False", ")", ":", "results", "=", "{", "}", "for", "tag", "in", "self", ".", "__repository", ".", "tags", ":", "if", "not", "tag", ".", "name", ".", ...
:param commit: the commit that all the experiments should have happened or None to include all :type commit: str :param must_contain_results: include only tags that contain results :type must_contain_results: bool :return: all the experiment data :rtype: dict
[ ":", "param", "commit", ":", "the", "commit", "that", "all", "the", "experiments", "should", "have", "happened", "or", "None", "to", "include", "all", ":", "type", "commit", ":", "str", ":", "param", "must_contain_results", ":", "include", "only", "tags", ...
python
valid
thebigmunch/gmusicapi-wrapper
gmusicapi_wrapper/mobileclient.py
https://github.com/thebigmunch/gmusicapi-wrapper/blob/8708683cd33955def1378fc28319ef37805b851d/gmusicapi_wrapper/mobileclient.py#L88-L123
def get_google_songs(self, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False): """Create song list from user's Google Music library. Parameters: include_filters (list): A list of ``(field, pattern)`` tuples. Fields are any valid Google Music metadata field available to the Mobileclient client. Patterns are Python regex patterns. Google Music songs are filtered out if the given metadata field values don't match any of the given patterns. exclude_filters (list): A list of ``(field, pattern)`` tuples. Fields are any valid Google Music metadata field available to the Mobileclient client. Patterns are Python regex patterns. Google Music songs are filtered out if the given metadata field values match any of the given patterns. all_includes (bool): If ``True``, all include_filters criteria must match to include a song. all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song. Returns: A list of Google Music song dicts matching criteria and a list of Google Music song dicts filtered out using filter criteria. """ logger.info("Loading Google Music songs...") google_songs = self.api.get_all_songs() matched_songs, filtered_songs = filter_google_songs( google_songs, include_filters=include_filters, exclude_filters=exclude_filters, all_includes=all_includes, all_excludes=all_excludes ) logger.info("Filtered {0} Google Music songs".format(len(filtered_songs))) logger.info("Loaded {0} Google Music songs".format(len(matched_songs))) return matched_songs, filtered_songs
[ "def", "get_google_songs", "(", "self", ",", "include_filters", "=", "None", ",", "exclude_filters", "=", "None", ",", "all_includes", "=", "False", ",", "all_excludes", "=", "False", ")", ":", "logger", ".", "info", "(", "\"Loading Google Music songs...\"", ")"...
Create song list from user's Google Music library. Parameters: include_filters (list): A list of ``(field, pattern)`` tuples. Fields are any valid Google Music metadata field available to the Mobileclient client. Patterns are Python regex patterns. Google Music songs are filtered out if the given metadata field values don't match any of the given patterns. exclude_filters (list): A list of ``(field, pattern)`` tuples. Fields are any valid Google Music metadata field available to the Mobileclient client. Patterns are Python regex patterns. Google Music songs are filtered out if the given metadata field values match any of the given patterns. all_includes (bool): If ``True``, all include_filters criteria must match to include a song. all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song. Returns: A list of Google Music song dicts matching criteria and a list of Google Music song dicts filtered out using filter criteria.
[ "Create", "song", "list", "from", "user", "s", "Google", "Music", "library", "." ]
python
valid
ray-project/ray
python/ray/experimental/async_plasma.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/async_plasma.py#L152-L160
def set_result(self, result): """Complete all tasks. """ for future in self.traverse(): # All cancelled futures should have callbacks to removed itself # from this linked list. However, these callbacks are scheduled in # an event loop, so we could still find them in our list. future.set_result(result) if not self.done(): super().set_result(result)
[ "def", "set_result", "(", "self", ",", "result", ")", ":", "for", "future", "in", "self", ".", "traverse", "(", ")", ":", "# All cancelled futures should have callbacks to removed itself", "# from this linked list. However, these callbacks are scheduled in", "# an event loop, s...
Complete all tasks.
[ "Complete", "all", "tasks", "." ]
python
train
globality-corp/microcosm-flask
microcosm_flask/conventions/relation.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/conventions/relation.py#L98-L135
def configure_replacefor(self, ns, definition): """ Register a replace-for relation endpoint. For typical usage, this relation is not strictly required; once an object exists and has its own ID, it is better to operate on it directly via dedicated CRUD routes. However, in some cases, the composite key of (subject_id, object_id) is required to look up the object. This happens, for example, when using DynamoDB where an object which maintains both a hash key and a range key requires specifying them both for access. The definition's func should be a replace function, which must: - accept kwargs for the new instance replacement parameters - return the instance :param ns: the namespace :param definition: the endpoint definition """ @self.add_route(ns.relation_path, Operation.ReplaceFor, ns) @request(definition.request_schema) @response(definition.response_schema) @wraps(definition.func) def replace(**path_data): headers = dict() request_data = load_request_data(definition.request_schema) response_data = require_response_data(definition.func(**merge_data(path_data, request_data))) definition.header_func(headers, response_data) response_format = self.negotiate_response_content(definition.response_formats) return dump_response_data( definition.response_schema, response_data, status_code=Operation.ReplaceFor.value.default_code, headers=headers, response_format=response_format, ) replace.__doc__ = "Replace a {} relative to a {}".format(pluralize(ns.object_name), ns.subject_name)
[ "def", "configure_replacefor", "(", "self", ",", "ns", ",", "definition", ")", ":", "@", "self", ".", "add_route", "(", "ns", ".", "relation_path", ",", "Operation", ".", "ReplaceFor", ",", "ns", ")", "@", "request", "(", "definition", ".", "request_schema...
Register a replace-for relation endpoint. For typical usage, this relation is not strictly required; once an object exists and has its own ID, it is better to operate on it directly via dedicated CRUD routes. However, in some cases, the composite key of (subject_id, object_id) is required to look up the object. This happens, for example, when using DynamoDB where an object which maintains both a hash key and a range key requires specifying them both for access. The definition's func should be a replace function, which must: - accept kwargs for the new instance replacement parameters - return the instance :param ns: the namespace :param definition: the endpoint definition
[ "Register", "a", "replace", "-", "for", "relation", "endpoint", "." ]
python
train
AnnAnnFryingPan/data_hub_call
build/lib/data_hub_call/dataHubCallTriangulum.py
https://github.com/AnnAnnFryingPan/data_hub_call/blob/907a481bb2adfff86d311bdf5a4fa352fd7e90be/build/lib/data_hub_call/dataHubCallTriangulum.py#L63-L134
def call_api_fetch(self, params, get_latest_only=True): """ GET https: // myserver / piwebapi / assetdatabases / D0NxzXSxtlKkGzAhZfHOB - KAQLhZ5wrU - UyRDQnzB_zGVAUEhMQUZTMDRcTlVHUkVFTg HTTP / 1.1 Host: myserver Accept: application / json""" output_format = 'application/json' url_string = self.request_info.url_string() # passing the username and required output format headers_list = {"Accept": output_format, "Host": self.request_info.host} try: hub_result = requests.get(url_string, headers=headers_list, timeout=10.000, verify=False) if hub_result.ok == False: raise ConnectionRefusedError("Connection to Triangulum hub refused: " + hub_result.reason) except: raise ConnectionError("Error connecting to Triangulum hub - check internet connection.") result = {} result_content_json = hub_result.json() result['ok'] = hub_result.ok result['content'] = json.dumps(result_content_json) if "Items" in result_content_json: available_matches = len(result_content_json['Items']) else: available_matches = 1 # No Date params allowed in call to hub, so apply get latest only to hub results here... if (get_latest_only and self.request_info.last_fetch_time != None): try: # Filter python objects with list comprehensions new_content = [x for x in result_content_json['Items'] if self.get_date_time(x['Timestamp']) > self.request_info.last_fetch_time] result_content_json['Items'] = new_content result['content'] = json.dumps(result_content_json) result['ok'] = True except ValueError as e: result['ok'] = False result['reason'] = str(e) except Exception as e: result['ok'] = False result['reason'] = 'Problem sorting results by date to get latest only. ' + str(e) result['available_matches'] = available_matches if 'Items' in result_content_json: result['returned_matches'] = len(result_content_json['Items']) else: result['returned_matches'] = 1 # Set last_fetch_time for next call if (get_latest_only): if (len(result_content_json['Items']) > 0): try: newlist = sorted(result_content_json['Items'], key=lambda k: self.get_date_time(k["Timestamp"]), reverse=True) most_recent = newlist[0]["Timestamp"] self.request_info.last_fetch_time = self.get_date_time(most_recent) except ValueError as e: result['ok'] = False result['reason'] = str(e) except Exception as e: result['ok'] = False result['reason'] = 'Problem sorting results by date to get latest only. ' + str(e) return result
[ "def", "call_api_fetch", "(", "self", ",", "params", ",", "get_latest_only", "=", "True", ")", ":", "output_format", "=", "'application/json'", "url_string", "=", "self", ".", "request_info", ".", "url_string", "(", ")", "# passing the username and required output for...
GET https: // myserver / piwebapi / assetdatabases / D0NxzXSxtlKkGzAhZfHOB - KAQLhZ5wrU - UyRDQnzB_zGVAUEhMQUZTMDRcTlVHUkVFTg HTTP / 1.1 Host: myserver Accept: application / json
[ "GET", "https", ":", "//", "myserver", "/", "piwebapi", "/", "assetdatabases", "/", "D0NxzXSxtlKkGzAhZfHOB", "-", "KAQLhZ5wrU", "-", "UyRDQnzB_zGVAUEhMQUZTMDRcTlVHUkVFTg", "HTTP", "/", "1", ".", "1", "Host", ":", "myserver", "Accept", ":", "application", "/", "j...
python
train
potash/drain
drain/data.py
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/data.py#L399-L413
def select_regexes(strings, regexes): """ select subset of strings matching a regex treats strings as a set """ strings = set(strings) select = set() if isinstance(strings, collections.Iterable): for r in regexes: s = set(filter(re.compile('^' + r + '$').search, strings)) strings -= s select |= s return select else: raise ValueError("exclude should be iterable")
[ "def", "select_regexes", "(", "strings", ",", "regexes", ")", ":", "strings", "=", "set", "(", "strings", ")", "select", "=", "set", "(", ")", "if", "isinstance", "(", "strings", ",", "collections", ".", "Iterable", ")", ":", "for", "r", "in", "regexes...
select subset of strings matching a regex treats strings as a set
[ "select", "subset", "of", "strings", "matching", "a", "regex", "treats", "strings", "as", "a", "set" ]
python
train
chrisrink10/basilisp
src/basilisp/lang/compiler/generator.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/generator.py#L1232-L1261
def __if_body_to_py_ast( ctx: GeneratorContext, node: Node, result_name: str ) -> GeneratedPyAST: """Generate custom `if` nodes to handle `recur` bodies. Recur nodes can appear in the then and else expressions of `if` forms. Recur nodes generate Python `continue` statements, which we would otherwise attempt to insert directly into an expression. Python will complain if it finds a statement in an expression AST slot, so we special case the recur handling here.""" if node.op == NodeOp.RECUR and ctx.recur_point.type == RecurType.LOOP: assert isinstance(node, Recur) return _recur_to_py_ast(ctx, node) elif node.op == NodeOp.DO: assert isinstance(node, Do) if_body = _synthetic_do_to_py_ast(ctx, node.assoc(is_body=True)) return GeneratedPyAST( node=ast.Assign( targets=[ast.Name(id=result_name, ctx=ast.Store())], value=if_body.node ), dependencies=list(map(statementize, if_body.dependencies)), ) else: py_ast = gen_py_ast(ctx, node) return GeneratedPyAST( node=ast.Assign( targets=[ast.Name(id=result_name, ctx=ast.Store())], value=py_ast.node ), dependencies=py_ast.dependencies, )
[ "def", "__if_body_to_py_ast", "(", "ctx", ":", "GeneratorContext", ",", "node", ":", "Node", ",", "result_name", ":", "str", ")", "->", "GeneratedPyAST", ":", "if", "node", ".", "op", "==", "NodeOp", ".", "RECUR", "and", "ctx", ".", "recur_point", ".", "...
Generate custom `if` nodes to handle `recur` bodies. Recur nodes can appear in the then and else expressions of `if` forms. Recur nodes generate Python `continue` statements, which we would otherwise attempt to insert directly into an expression. Python will complain if it finds a statement in an expression AST slot, so we special case the recur handling here.
[ "Generate", "custom", "if", "nodes", "to", "handle", "recur", "bodies", "." ]
python
test
shendo/websnort
websnort/ids/snort.py
https://github.com/shendo/websnort/blob/19495e8834a111e889ba28efad8cd90cf55eb661/websnort/ids/snort.py#L80-L91
def parse_version(output): """ Parses the supplied output and returns the version string. :param output: A string containing the output of running snort. :returns: Version string for the version of snort run. None if not found. """ for x in output.splitlines(): match = VERSION_PATTERN.match(x) if match: return match.group('version').strip() return None
[ "def", "parse_version", "(", "output", ")", ":", "for", "x", "in", "output", ".", "splitlines", "(", ")", ":", "match", "=", "VERSION_PATTERN", ".", "match", "(", "x", ")", "if", "match", ":", "return", "match", ".", "group", "(", "'version'", ")", "...
Parses the supplied output and returns the version string. :param output: A string containing the output of running snort. :returns: Version string for the version of snort run. None if not found.
[ "Parses", "the", "supplied", "output", "and", "returns", "the", "version", "string", "." ]
python
train
Robpol86/etaprogress
etaprogress/components/base_progress_bar.py
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/base_progress_bar.py#L40-L61
def numerator(self, value): """Sets a new numerator and generates the ETA. Must be greater than or equal to previous numerator.""" # If ETA is every iteration, don't do anything fancy. if self.eta_every <= 1: self._eta.numerator = value self._eta_string = self._generate_eta(self._eta.eta_seconds) return # If ETA is not every iteration, unstable rate is used. If this bar is undefined, no point in calculating ever. if self._eta.undefined: self._eta.set_numerator(value, calculate=False) return # Calculate if this iteration is the right one. if self._eta_count >= self.eta_every: self._eta_count = 1 self._eta.numerator = value self._eta_string = self._generate_eta(self._eta.eta_seconds) return self._eta_count += 1 self._eta.set_numerator(value, calculate=False)
[ "def", "numerator", "(", "self", ",", "value", ")", ":", "# If ETA is every iteration, don't do anything fancy.", "if", "self", ".", "eta_every", "<=", "1", ":", "self", ".", "_eta", ".", "numerator", "=", "value", "self", ".", "_eta_string", "=", "self", ".",...
Sets a new numerator and generates the ETA. Must be greater than or equal to previous numerator.
[ "Sets", "a", "new", "numerator", "and", "generates", "the", "ETA", ".", "Must", "be", "greater", "than", "or", "equal", "to", "previous", "numerator", "." ]
python
train
tensorflow/hub
tensorflow_hub/native_module.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/native_module.py#L472-L561
def create_apply_graph(self, signature, input_tensors, name): """See `ModuleImpl.create_apply_graph`.""" signature_def = self._meta_graph.signature_def.get(signature) meta_graph = meta_graph_pb2.MetaGraphDef() meta_graph.CopyFrom(self._meta_graph) apply_graph = tf_v1.get_default_graph() infeed_map = tensor_info.build_input_map(signature_def.inputs, input_tensors) # Build a input map to feed when importing the apply-graph by augmenting the # state_map with the input args. This allows an input to override a tensor # from the state-graph. feed_map = dict(self._state_map) # If we are applying the module in a function with a TPUReplicateContext, we # must capture the state tensors in generating our feedmap and prune out # assign ops. Function graph semantics are different in that all ops are # executed regardless of dependency. # TODO(b/112575006): The following adds functionality of function call # within a TPU context. Work to generalize this for all function calls is # ongoing. if self._is_tpu_graph_function(): for k, v in self._state_map.items(): feed_map[k] = apply_graph.capture(v) meta_graph_lib.prune_unused_nodes(meta_graph, signature_def) # After we prune the metagraph def, we might need to prune away # infeeds which no longer exist. meta_graph_lib.prune_feed_map(meta_graph, infeed_map) elif apply_graph.building_function: raise NotImplementedError( "Using TF-Hub module within a TensorFlow defined function " "is currently not supported.") # As state ops in the apply graph are unused, replace them with Placeholders # so that in a heirarchical instantiation, apply_graph state ops are # ignored. replace_apply_state(meta_graph, list_registered_stateful_ops_without_inputs(), feed_map) feed_map.update(infeed_map) # Make state tensors enter the current context. This way the Module can be # applied inside a control flow structure such as a while_loop. control_flow = apply_graph._get_control_flow_context() # pylint: disable=protected-access if control_flow: for key, value in sorted(feed_map.items()): feed_map[key] = control_flow.AddValue(value) # Don't mark the name as used at this point - import_scoped_meta_graph will # start using it. absolute_scope_name = apply_graph.unique_name(name, mark_as_used=False) relative_scope_name = absolute_scope_name.split("/")[-1] import_collections = [ # In most cases ASSET_FILEPATHS are only used for the TABLE_INITIALIZERS # ops, however one could create a graph that uses an asset at any other # time. As so everytime we bring the tensor with that has the asset # filename we must annotate it as so, so later re-exports have that # semantic information and can handle it. tf_v1.GraphKeys.ASSET_FILEPATHS, tf_v1.GraphKeys.COND_CONTEXT, tf_v1.GraphKeys.WHILE_CONTEXT, ] if self._trainable: import_collections.extend([tf_v1.GraphKeys.UPDATE_OPS]) meta_graph_lib.filter_collections(meta_graph, import_collections) meta_graph_lib.prefix_shared_name_attributes(meta_graph, absolute_scope_name) if len(meta_graph.collection_def) and self._is_tpu_graph_function(): raise NotImplementedError( "Applying modules with collections inside TPU functions is not " "supported.") tf_v1.train.import_meta_graph( meta_graph, input_map=feed_map, import_scope=relative_scope_name) fix_colocation_after_import(input_map=feed_map, absolute_import_scope=absolute_scope_name) def get_tensor(name): # When trying to output an input tensor there are no nodes created within # the apply scope. So one must look into the input map. try: return feed_map[name] except KeyError: return apply_graph.get_tensor_by_name( meta_graph_lib.prepend_name_scope( name, import_scope=absolute_scope_name)) return tensor_info.build_output_map(signature_def.outputs, get_tensor)
[ "def", "create_apply_graph", "(", "self", ",", "signature", ",", "input_tensors", ",", "name", ")", ":", "signature_def", "=", "self", ".", "_meta_graph", ".", "signature_def", ".", "get", "(", "signature", ")", "meta_graph", "=", "meta_graph_pb2", ".", "MetaG...
See `ModuleImpl.create_apply_graph`.
[ "See", "ModuleImpl", ".", "create_apply_graph", "." ]
python
train
CalebBell/fluids
fluids/optional/pychebfun.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/optional/pychebfun.py#L395-L401
def restrict(self,subinterval): """ Return a Polyfun that matches self on subinterval. """ if (subinterval[0] < self._domain[0]) or (subinterval[1] > self._domain[1]): raise ValueError("Can only restrict to subinterval") return self.from_function(self, subinterval)
[ "def", "restrict", "(", "self", ",", "subinterval", ")", ":", "if", "(", "subinterval", "[", "0", "]", "<", "self", ".", "_domain", "[", "0", "]", ")", "or", "(", "subinterval", "[", "1", "]", ">", "self", ".", "_domain", "[", "1", "]", ")", ":...
Return a Polyfun that matches self on subinterval.
[ "Return", "a", "Polyfun", "that", "matches", "self", "on", "subinterval", "." ]
python
train
OnroerendErfgoed/oe_utils
oe_utils/search/__init__.py
https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/search/__init__.py#L42-L58
def parse_filter_params(query_params, filterable): """ Parse query_params to a filter params dict. Merge multiple values for one key to a list. Filter out keys that aren't filterable. :param query_params: query params :param filterable: list of filterable keys :return: dict of filter values """ if query_params is not None: filter_params = {} for fq in query_params.mixed(): if fq in filterable: filter_params[fq] = query_params.mixed().get(fq) return filter_params else: return {}
[ "def", "parse_filter_params", "(", "query_params", ",", "filterable", ")", ":", "if", "query_params", "is", "not", "None", ":", "filter_params", "=", "{", "}", "for", "fq", "in", "query_params", ".", "mixed", "(", ")", ":", "if", "fq", "in", "filterable", ...
Parse query_params to a filter params dict. Merge multiple values for one key to a list. Filter out keys that aren't filterable. :param query_params: query params :param filterable: list of filterable keys :return: dict of filter values
[ "Parse", "query_params", "to", "a", "filter", "params", "dict", ".", "Merge", "multiple", "values", "for", "one", "key", "to", "a", "list", ".", "Filter", "out", "keys", "that", "aren", "t", "filterable", "." ]
python
train
posativ/isso
isso/migrate.py
https://github.com/posativ/isso/blob/78997f491044b7d694ac7170edc32030544095b7/isso/migrate.py#L296-L306
def migrate(self): """Process the input file and fill the DB.""" with io.open(self.json_file, 'rt', encoding='utf8') as fh: threads = json.load(fh) progress = Progress(len(threads)) for i, thread in enumerate(threads): progress.update(i, str(i)) self.insert(thread) progress.finish("{0} threads, {1} comments".format(len(threads), self.count))
[ "def", "migrate", "(", "self", ")", ":", "with", "io", ".", "open", "(", "self", ".", "json_file", ",", "'rt'", ",", "encoding", "=", "'utf8'", ")", "as", "fh", ":", "threads", "=", "json", ".", "load", "(", "fh", ")", "progress", "=", "Progress", ...
Process the input file and fill the DB.
[ "Process", "the", "input", "file", "and", "fill", "the", "DB", "." ]
python
train
mezz64/pyHik
pyhik/hikvision.py
https://github.com/mezz64/pyHik/blob/1e7afca926e2b045257a43cbf8b1236a435493c2/pyhik/hikvision.py#L220-L223
def add_update_callback(self, callback, sensor): """Register as callback for when a matching device sensor changes.""" self._updateCallbacks.append([callback, sensor]) _LOGGING.debug('Added update callback to %s on %s', callback, sensor)
[ "def", "add_update_callback", "(", "self", ",", "callback", ",", "sensor", ")", ":", "self", ".", "_updateCallbacks", ".", "append", "(", "[", "callback", ",", "sensor", "]", ")", "_LOGGING", ".", "debug", "(", "'Added update callback to %s on %s'", ",", "call...
Register as callback for when a matching device sensor changes.
[ "Register", "as", "callback", "for", "when", "a", "matching", "device", "sensor", "changes", "." ]
python
train
OCR-D/core
ocrd/ocrd/workspace_backup.py
https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd/ocrd/workspace_backup.py#L70-L88
def add(self): """ Create a backup in <self.backup_directory> """ log = getLogger('ocrd.workspace_backup.add') mets_str = self.workspace.mets.to_xml() chksum = _chksum(mets_str) backups = self.list() if backups and backups[0].chksum == chksum: log.info('No changes since last backup: %s' % backups[0]) else: timestamp = datetime.now().timestamp() d = join(self.backup_directory, '%s.%s' % (chksum, timestamp)) mets_file = join(d, 'mets.xml') log.info("Backing up to %s" % mets_file) makedirs(d) with open(mets_file, 'wb') as f: f.write(mets_str) return chksum
[ "def", "add", "(", "self", ")", ":", "log", "=", "getLogger", "(", "'ocrd.workspace_backup.add'", ")", "mets_str", "=", "self", ".", "workspace", ".", "mets", ".", "to_xml", "(", ")", "chksum", "=", "_chksum", "(", "mets_str", ")", "backups", "=", "self"...
Create a backup in <self.backup_directory>
[ "Create", "a", "backup", "in", "<self", ".", "backup_directory", ">" ]
python
train
worldcompany/djangoembed
oembed/sites.py
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/sites.py#L106-L114
def provider_for_url(self, url): """ Find the right provider for a URL """ for provider, regex in self.get_registry().items(): if re.match(regex, url) is not None: return provider raise OEmbedMissingEndpoint('No endpoint matches URL: %s' % url)
[ "def", "provider_for_url", "(", "self", ",", "url", ")", ":", "for", "provider", ",", "regex", "in", "self", ".", "get_registry", "(", ")", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "regex", ",", "url", ")", "is", "not", "None", ...
Find the right provider for a URL
[ "Find", "the", "right", "provider", "for", "a", "URL" ]
python
valid
fredrike/pypoint
pypoint/__init__.py
https://github.com/fredrike/pypoint/blob/b5c9a701d2b7e24d796aa7f8c410360b61d8ec8a/pypoint/__init__.py#L106-L109
def _request_devices(self, url, _type): """Request list of devices.""" res = self._request(url) return res.get(_type) if res else {}
[ "def", "_request_devices", "(", "self", ",", "url", ",", "_type", ")", ":", "res", "=", "self", ".", "_request", "(", "url", ")", "return", "res", ".", "get", "(", "_type", ")", "if", "res", "else", "{", "}" ]
Request list of devices.
[ "Request", "list", "of", "devices", "." ]
python
train
gem/oq-engine
openquake/commands/show.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commands/show.py#L53-L109
def show(what='contents', calc_id=-1, extra=()): """ Show the content of a datastore (by default the last one). """ datadir = datastore.get_datadir() if what == 'all': # show all if not os.path.exists(datadir): return rows = [] for calc_id in datastore.get_calc_ids(datadir): try: ds = util.read(calc_id) oq = ds['oqparam'] cmode, descr = oq.calculation_mode, oq.description except Exception: # invalid datastore file, or missing calculation_mode # and description attributes, perhaps due to a manual kill f = os.path.join(datadir, 'calc_%s.hdf5' % calc_id) logging.warning('Unreadable datastore %s', f) continue else: rows.append((calc_id, cmode, descr.encode('utf-8'))) for row in sorted(rows, key=lambda row: row[0]): # by calc_id print('#%d %s: %s' % row) return ds = util.read(calc_id) # this part is experimental if what == 'rlzs' and 'poes' in ds: min_value = 0.01 # used in rmsep getter = getters.PmapGetter(ds) pmaps = getter.get_pmaps() weights = [rlz.weight for rlz in getter.rlzs] mean = stats.compute_pmap_stats( pmaps, [numpy.mean], weights, getter.imtls) dists = [] for rlz, pmap in zip(getter.rlzs, pmaps): dist = util.rmsep(mean.array, pmap.array, min_value) dists.append((dist, rlz)) print('Realizations in order of distance from the mean curves') for dist, rlz in sorted(dists): print('%s: rmsep=%s' % (rlz, dist)) elif view.keyfunc(what) in view: print(view(what, ds)) elif what.split('/', 1)[0] in extract: print(extract(ds, what, *extra)) elif what in ds: obj = ds[what] if hasattr(obj, 'value'): # an array print(write_csv(io.BytesIO(), obj.value).decode('utf8')) else: print(obj) else: print('%s not found' % what) ds.close()
[ "def", "show", "(", "what", "=", "'contents'", ",", "calc_id", "=", "-", "1", ",", "extra", "=", "(", ")", ")", ":", "datadir", "=", "datastore", ".", "get_datadir", "(", ")", "if", "what", "==", "'all'", ":", "# show all", "if", "not", "os", ".", ...
Show the content of a datastore (by default the last one).
[ "Show", "the", "content", "of", "a", "datastore", "(", "by", "default", "the", "last", "one", ")", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/jid.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/jid.py#L156-L186
def __from_unicode(cls, data, check = True): """Return jid tuple from an Unicode string. :Parameters: - `data`: the JID string - `check`: when `False` then the JID is not checked for specification compliance. :Return: (localpart, domainpart, resourcepart) tuple""" parts1 = data.split(u"/", 1) parts2 = parts1[0].split(u"@", 1) if len(parts2) == 2: local = parts2[0] domain = parts2[1] if check: local = cls.__prepare_local(local) domain = cls.__prepare_domain(domain) else: local = None domain = parts2[0] if check: domain = cls.__prepare_domain(domain) if len(parts1) == 2: resource = parts1[1] if check: resource = cls.__prepare_resource(parts1[1]) else: resource = None if not domain: raise JIDError("Domain is required in JID.") return (local, domain, resource)
[ "def", "__from_unicode", "(", "cls", ",", "data", ",", "check", "=", "True", ")", ":", "parts1", "=", "data", ".", "split", "(", "u\"/\"", ",", "1", ")", "parts2", "=", "parts1", "[", "0", "]", ".", "split", "(", "u\"@\"", ",", "1", ")", "if", ...
Return jid tuple from an Unicode string. :Parameters: - `data`: the JID string - `check`: when `False` then the JID is not checked for specification compliance. :Return: (localpart, domainpart, resourcepart) tuple
[ "Return", "jid", "tuple", "from", "an", "Unicode", "string", "." ]
python
valid
edx/edx-val
edxval/utils.py
https://github.com/edx/edx-val/blob/30df48061e77641edb5272895b7c7f7f25eb7aa7/edxval/utils.py#L189-L200
def create_file_in_fs(file_data, file_name, file_system, static_dir): """ Writes file in specific file system. Arguments: file_data (str): Data to store into the file. file_name (str): File name of the file to be created. file_system (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. """ with file_system.open(combine(static_dir, file_name), 'wb') as f: f.write(file_data.encode('utf-8'))
[ "def", "create_file_in_fs", "(", "file_data", ",", "file_name", ",", "file_system", ",", "static_dir", ")", ":", "with", "file_system", ".", "open", "(", "combine", "(", "static_dir", ",", "file_name", ")", ",", "'wb'", ")", "as", "f", ":", "f", ".", "wr...
Writes file in specific file system. Arguments: file_data (str): Data to store into the file. file_name (str): File name of the file to be created. file_system (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file.
[ "Writes", "file", "in", "specific", "file", "system", "." ]
python
train
inasafe/inasafe
safe/gui/tools/wizard/step_fc35_explayer_from_canvas.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc35_explayer_from_canvas.py#L144-L158
def help_content(self): """Return the content of help for this step wizard. We only needs to re-implement this method in each wizard step. :returns: A message object contains help. :rtype: m.Message """ message = m.Message() message.add(m.Paragraph(tr( 'In this wizard step: {step_name}, You can choose a exposure ' 'layer from the list of layers that have been loaded to QGIS and ' 'that matches with the geometry and exposure type you set in the ' 'previous step').format(step_name=self.step_name))) return message
[ "def", "help_content", "(", "self", ")", ":", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'In this wizard step: {step_name}, You can choose a exposure '", "'layer from the list of layers that have ...
Return the content of help for this step wizard. We only needs to re-implement this method in each wizard step. :returns: A message object contains help. :rtype: m.Message
[ "Return", "the", "content", "of", "help", "for", "this", "step", "wizard", "." ]
python
train
frictionlessdata/datapackage-py
datapackage/resource.py
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/resource.py#L222-L229
def raw_read(self): """https://github.com/frictionlessdata/datapackage-py#resource """ contents = b'' with self.raw_iter() as filelike: for chunk in filelike: contents += chunk return contents
[ "def", "raw_read", "(", "self", ")", ":", "contents", "=", "b''", "with", "self", ".", "raw_iter", "(", ")", "as", "filelike", ":", "for", "chunk", "in", "filelike", ":", "contents", "+=", "chunk", "return", "contents" ]
https://github.com/frictionlessdata/datapackage-py#resource
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#resource" ]
python
valid
spacetelescope/synphot_refactor
synphot/models.py
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/models.py#L252-L268
def evaluate(self, x, *args): """One dimensional constant flux model function. Parameters ---------- x : number or ndarray Wavelengths in Angstrom. Returns ------- y : number or ndarray Flux in PHOTLAM. """ a = (self.amplitude * np.ones_like(x)) * self._flux_unit y = units.convert_flux(x, a, units.PHOTLAM) return y.value
[ "def", "evaluate", "(", "self", ",", "x", ",", "*", "args", ")", ":", "a", "=", "(", "self", ".", "amplitude", "*", "np", ".", "ones_like", "(", "x", ")", ")", "*", "self", ".", "_flux_unit", "y", "=", "units", ".", "convert_flux", "(", "x", ",...
One dimensional constant flux model function. Parameters ---------- x : number or ndarray Wavelengths in Angstrom. Returns ------- y : number or ndarray Flux in PHOTLAM.
[ "One", "dimensional", "constant", "flux", "model", "function", "." ]
python
train
ladybug-tools/ladybug
ladybug/location.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/location.py#L40-L59
def from_json(cls, data): """Create a location from a dictionary. Args: data: { "city": "-", "latitude": 0, "longitude": 0, "time_zone": 0, "elevation": 0} """ optional_keys = ('city', 'state', 'country', 'latitude', 'longitude', 'time_zone', 'elevation', 'station_id', 'source') for key in optional_keys: if key not in data: data[key] = None return cls(data['city'], data['state'], data['country'], data['latitude'], data['longitude'], data['time_zone'], data['elevation'], data['station_id'], data['source'])
[ "def", "from_json", "(", "cls", ",", "data", ")", ":", "optional_keys", "=", "(", "'city'", ",", "'state'", ",", "'country'", ",", "'latitude'", ",", "'longitude'", ",", "'time_zone'", ",", "'elevation'", ",", "'station_id'", ",", "'source'", ")", "for", "...
Create a location from a dictionary. Args: data: { "city": "-", "latitude": 0, "longitude": 0, "time_zone": 0, "elevation": 0}
[ "Create", "a", "location", "from", "a", "dictionary", "." ]
python
train
pantsbuild/pex
pex/variables.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/variables.py#L123-L129
def patch(self, **kw): """Update the environment for the duration of a context.""" old_environ = self._environ self._environ = self._environ.copy() self._environ.update(kw) yield self._environ = old_environ
[ "def", "patch", "(", "self", ",", "*", "*", "kw", ")", ":", "old_environ", "=", "self", ".", "_environ", "self", ".", "_environ", "=", "self", ".", "_environ", ".", "copy", "(", ")", "self", ".", "_environ", ".", "update", "(", "kw", ")", "yield", ...
Update the environment for the duration of a context.
[ "Update", "the", "environment", "for", "the", "duration", "of", "a", "context", "." ]
python
train
RJT1990/pyflux
pyflux/ssm/dar.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/dar.py#L84-L97
def _create_latent_variables(self): """ Creates model latent variables Returns ---------- None (changes model attributes) """ self.latent_variables.add_z('Sigma^2 irregular', fam.Flat(transform='exp'), fam.Normal(0,3)) self.latent_variables.add_z('Constant', fam.Flat(transform=None), fam.Normal(0,3)) for parm in range(1,self.ar+1): self.latent_variables.add_z('Sigma^2 AR(' + str(parm) + ')', fam.Flat(transform='exp'), fam.Normal(0,3))
[ "def", "_create_latent_variables", "(", "self", ")", ":", "self", ".", "latent_variables", ".", "add_z", "(", "'Sigma^2 irregular'", ",", "fam", ".", "Flat", "(", "transform", "=", "'exp'", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", ...
Creates model latent variables Returns ---------- None (changes model attributes)
[ "Creates", "model", "latent", "variables" ]
python
train
HumanCellAtlas/cloud-blobstore
cloud_blobstore/__init__.py
https://github.com/HumanCellAtlas/cloud-blobstore/blob/b8a60e8e8c0da0e39dda084cb467a34cd2d1ef0a/cloud_blobstore/__init__.py#L232-L245
def get_user_metadata( self, bucket: str, key: str ) -> typing.Dict[str, str]: """ Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or suffixes for the metadata keys, they should be stripped before being returned. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: a dictionary mapping metadata keys to metadata values. """ raise NotImplementedError()
[ "def", "get_user_metadata", "(", "self", ",", "bucket", ":", "str", ",", "key", ":", "str", ")", "->", "typing", ".", "Dict", "[", "str", ",", "str", "]", ":", "raise", "NotImplementedError", "(", ")" ]
Retrieves the user metadata for a given object in a given bucket. If the platform has any mandatory prefixes or suffixes for the metadata keys, they should be stripped before being returned. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: a dictionary mapping metadata keys to metadata values.
[ "Retrieves", "the", "user", "metadata", "for", "a", "given", "object", "in", "a", "given", "bucket", ".", "If", "the", "platform", "has", "any", "mandatory", "prefixes", "or", "suffixes", "for", "the", "metadata", "keys", "they", "should", "be", "stripped", ...
python
train
orbingol/NURBS-Python
geomdl/linalg.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L565-L595
def convex_hull(points): """ Returns points on convex hull in counterclockwise order according to Graham's scan algorithm. Reference: https://gist.github.com/arthur-e/5cf52962341310f438e96c1f3c3398b8 .. note:: This implementation only works in 2-dimensional space. :param points: list of 2-dimensional points :type points: list, tuple :return: convex hull of the input points :rtype: list """ turn_left, turn_right, turn_none = (1, -1, 0) def cmp(a, b): return (a > b) - (a < b) def turn(p, q, r): return cmp((q[0] - p[0])*(r[1] - p[1]) - (r[0] - p[0])*(q[1] - p[1]), 0) def keep_left(hull, r): while len(hull) > 1 and turn(hull[-2], hull[-1], r) != turn_left: hull.pop() if not len(hull) or hull[-1] != r: hull.append(r) return hull points = sorted(points) l = reduce(keep_left, points, []) u = reduce(keep_left, reversed(points), []) return l.extend(u[i] for i in range(1, len(u) - 1)) or l
[ "def", "convex_hull", "(", "points", ")", ":", "turn_left", ",", "turn_right", ",", "turn_none", "=", "(", "1", ",", "-", "1", ",", "0", ")", "def", "cmp", "(", "a", ",", "b", ")", ":", "return", "(", "a", ">", "b", ")", "-", "(", "a", "<", ...
Returns points on convex hull in counterclockwise order according to Graham's scan algorithm. Reference: https://gist.github.com/arthur-e/5cf52962341310f438e96c1f3c3398b8 .. note:: This implementation only works in 2-dimensional space. :param points: list of 2-dimensional points :type points: list, tuple :return: convex hull of the input points :rtype: list
[ "Returns", "points", "on", "convex", "hull", "in", "counterclockwise", "order", "according", "to", "Graham", "s", "scan", "algorithm", "." ]
python
train
jtmoulia/switchboard-python
aplus/__init__.py
https://github.com/jtmoulia/switchboard-python/blob/c9b0cb74cb12a64385465091be633e78d39f08b1/aplus/__init__.py#L47-L66
def reject(self, reason): """ Reject this promise for a given reason. """ assert self._state==self.PENDING self._state=self.REJECTED; self.reason = reason for errback in self._errbacks: try: errback(reason) except Exception: # Ignore errors in callbacks pass # We will never call these errbacks again, so allow # them to be garbage collected. This is important since # they probably include closures which are binding variables # that might otherwise be garbage collected. self._errbacks = []
[ "def", "reject", "(", "self", ",", "reason", ")", ":", "assert", "self", ".", "_state", "==", "self", ".", "PENDING", "self", ".", "_state", "=", "self", ".", "REJECTED", "self", ".", "reason", "=", "reason", "for", "errback", "in", "self", ".", "_er...
Reject this promise for a given reason.
[ "Reject", "this", "promise", "for", "a", "given", "reason", "." ]
python
train
MaxHalford/prince
prince/mfa.py
https://github.com/MaxHalford/prince/blob/714c9cdfc4d9f8823eabf550a23ad01fe87c50d7/prince/mfa.py#L158-L190
def partial_row_coordinates(self, X): """Returns the row coordinates for each group.""" utils.validation.check_is_fitted(self, 's_') # Check input if self.check_input: utils.check_array(X, dtype=[str, np.number]) # Prepare input X = self._prepare_input(X) # Define the projection matrix P P = len(X) ** 0.5 * self.U_ / self.s_ # Get the projections for each group coords = {} for name, cols in sorted(self.groups.items()): X_partial = X.loc[:, cols] if not self.all_nums_[name]: X_partial = self.cat_one_hots_[name].transform(X_partial) Z_partial = X_partial / self.partial_factor_analysis_[name].s_[0] coords[name] = len(self.groups) * (Z_partial @ Z_partial.T) @ P # Convert coords to a MultiIndex DataFrame coords = pd.DataFrame({ (name, i): group_coords.loc[:, i] for name, group_coords in coords.items() for i in range(group_coords.shape[1]) }) return coords
[ "def", "partial_row_coordinates", "(", "self", ",", "X", ")", ":", "utils", ".", "validation", ".", "check_is_fitted", "(", "self", ",", "'s_'", ")", "# Check input", "if", "self", ".", "check_input", ":", "utils", ".", "check_array", "(", "X", ",", "dtype...
Returns the row coordinates for each group.
[ "Returns", "the", "row", "coordinates", "for", "each", "group", "." ]
python
train
riga/law
law/workflow/local.py
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/local.py#L29-L38
def complete(self): """ When *local_workflow_require_branches* of the task was set to *True*, returns whether the :py:meth:`run` method has been called before. Otherwise, the call is forwarded to the super class. """ if self.task.local_workflow_require_branches: return self._has_run else: return super(LocalWorkflowProxy, self).complete()
[ "def", "complete", "(", "self", ")", ":", "if", "self", ".", "task", ".", "local_workflow_require_branches", ":", "return", "self", ".", "_has_run", "else", ":", "return", "super", "(", "LocalWorkflowProxy", ",", "self", ")", ".", "complete", "(", ")" ]
When *local_workflow_require_branches* of the task was set to *True*, returns whether the :py:meth:`run` method has been called before. Otherwise, the call is forwarded to the super class.
[ "When", "*", "local_workflow_require_branches", "*", "of", "the", "task", "was", "set", "to", "*", "True", "*", "returns", "whether", "the", ":", "py", ":", "meth", ":", "run", "method", "has", "been", "called", "before", ".", "Otherwise", "the", "call", ...
python
train
saltstack/salt
salt/client/ssh/wrapper/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/wrapper/state.py#L170-L177
def _parse_mods(mods): ''' Parse modules. ''' if isinstance(mods, six.string_types): mods = [item.strip() for item in mods.split(',') if item.strip()] return mods
[ "def", "_parse_mods", "(", "mods", ")", ":", "if", "isinstance", "(", "mods", ",", "six", ".", "string_types", ")", ":", "mods", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "mods", ".", "split", "(", "','", ")", "if", "item", "...
Parse modules.
[ "Parse", "modules", "." ]
python
train
brentp/cruzdb
cruzdb/intersecter.py
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/intersecter.py#L269-L284
def distance(f1, f2): """\ Distance between 2 features. The integer result is always positive or zero. If the features overlap or touch, it is zero. >>> from intersecter import Feature, distance >>> distance(Feature(1, 2), Feature(12, 13)) 10 >>> distance(Feature(1, 2), Feature(2, 3)) 0 >>> distance(Feature(1, 100), Feature(20, 30)) 0 """ if f1.end < f2.start: return f2.start - f1.end if f2.end < f1.start: return f1.start - f2.end return 0
[ "def", "distance", "(", "f1", ",", "f2", ")", ":", "if", "f1", ".", "end", "<", "f2", ".", "start", ":", "return", "f2", ".", "start", "-", "f1", ".", "end", "if", "f2", ".", "end", "<", "f1", ".", "start", ":", "return", "f1", ".", "start", ...
\ Distance between 2 features. The integer result is always positive or zero. If the features overlap or touch, it is zero. >>> from intersecter import Feature, distance >>> distance(Feature(1, 2), Feature(12, 13)) 10 >>> distance(Feature(1, 2), Feature(2, 3)) 0 >>> distance(Feature(1, 100), Feature(20, 30)) 0
[ "\\", "Distance", "between", "2", "features", ".", "The", "integer", "result", "is", "always", "positive", "or", "zero", ".", "If", "the", "features", "overlap", "or", "touch", "it", "is", "zero", ".", ">>>", "from", "intersecter", "import", "Feature", "di...
python
train
ponty/pyscreenshot
pyscreenshot/plugins/gtkpixbuf.py
https://github.com/ponty/pyscreenshot/blob/51010195cbb5361dcd4b414ff132b87244c9e1cb/pyscreenshot/plugins/gtkpixbuf.py#L30-L56
def grab_to_file(self, filename, bbox=None): """http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html. only "jpeg" or "png" """ w = self.gtk.gdk.get_default_root_window() # Capture the whole screen. if bbox is None: sz = w.get_size() pb = self.gtk.gdk.Pixbuf( self.gtk.gdk.COLORSPACE_RGB, False, 8, sz[0], sz[1]) # 24bit RGB pb = pb.get_from_drawable( w, w.get_colormap(), 0, 0, 0, 0, sz[0], sz[1]) # Only capture what we need. The smaller the capture, the faster. else: sz = [bbox[2] - bbox[0], bbox[3] - bbox[1]] pb = self.gtk.gdk.Pixbuf( self.gtk.gdk.COLORSPACE_RGB, False, 8, sz[0], sz[1]) pb = pb.get_from_drawable( w, w.get_colormap(), bbox[0], bbox[1], 0, 0, sz[0], sz[1]) assert pb ftype = 'png' if filename.endswith('.jpeg'): ftype = 'jpeg' pb.save(filename, ftype)
[ "def", "grab_to_file", "(", "self", ",", "filename", ",", "bbox", "=", "None", ")", ":", "w", "=", "self", ".", "gtk", ".", "gdk", ".", "get_default_root_window", "(", ")", "# Capture the whole screen.", "if", "bbox", "is", "None", ":", "sz", "=", ...
http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html. only "jpeg" or "png"
[ "http", ":", "//", "www", ".", "pygtk", ".", "org", "/", "docs", "/", "pygtk", "/", "class", "-", "gdkpixbuf", ".", "html", "." ]
python
valid
networks-lab/metaknowledge
metaknowledge/citation.py
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/citation.py#L263-L311
def isJournal(self, dbname = abrevDBname, manualDB = manualDBname, returnDict ='both', checkIfExcluded = False): """Returns `True` if the `Citation`'s `journal` field is a journal abbreviation from the WOS listing found at [http://images.webofknowledge.com/WOK46/help/WOS/A_abrvjt.html](http://images.webofknowledge.com/WOK46/help/WOS/A_abrvjt.html), i.e. checks if the citation is citing a journal. **Note**: Requires the [j9Abbreviations](../modules/journalAbbreviations.html#metaknowledge.journalAbbreviations.backend.getj9dict) database file and will raise an error if it cannot be found. **Note**: All parameters are used for getting the data base with [getj9dict](../modules/journalAbbreviations.html#metaknowledge.journalAbbreviations.backend.getj9dict). # Parameters _dbname_ : `optional [str]` > The name of the downloaded database file, the default is determined at run time. It is recommended that this remain untouched. _manualDB_ : `optional [str]` > The name of the manually created database file, the default is determined at run time. It is recommended that this remain untouched. _returnDict_ : `optional [str]` > default `'both'`, can be used to get both databases or only one with `'WOS'` or `'manual'`. # Returns `bool` > `True` if the `Citation` is for a journal """ global abbrevDict if abbrevDict is None: abbrevDict = getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict) if not hasattr(self, 'journal'): return False elif checkIfExcluded and self.journal: try: if abbrevDict.get(self.journal, [True])[0]: return False else: return True except IndexError: return False else: if self.journal: dictVal = abbrevDict.get(self.journal, [b''])[0] if dictVal: return dictVal else: return False else: return False
[ "def", "isJournal", "(", "self", ",", "dbname", "=", "abrevDBname", ",", "manualDB", "=", "manualDBname", ",", "returnDict", "=", "'both'", ",", "checkIfExcluded", "=", "False", ")", ":", "global", "abbrevDict", "if", "abbrevDict", "is", "None", ":", "abbrev...
Returns `True` if the `Citation`'s `journal` field is a journal abbreviation from the WOS listing found at [http://images.webofknowledge.com/WOK46/help/WOS/A_abrvjt.html](http://images.webofknowledge.com/WOK46/help/WOS/A_abrvjt.html), i.e. checks if the citation is citing a journal. **Note**: Requires the [j9Abbreviations](../modules/journalAbbreviations.html#metaknowledge.journalAbbreviations.backend.getj9dict) database file and will raise an error if it cannot be found. **Note**: All parameters are used for getting the data base with [getj9dict](../modules/journalAbbreviations.html#metaknowledge.journalAbbreviations.backend.getj9dict). # Parameters _dbname_ : `optional [str]` > The name of the downloaded database file, the default is determined at run time. It is recommended that this remain untouched. _manualDB_ : `optional [str]` > The name of the manually created database file, the default is determined at run time. It is recommended that this remain untouched. _returnDict_ : `optional [str]` > default `'both'`, can be used to get both databases or only one with `'WOS'` or `'manual'`. # Returns `bool` > `True` if the `Citation` is for a journal
[ "Returns", "True", "if", "the", "Citation", "s", "journal", "field", "is", "a", "journal", "abbreviation", "from", "the", "WOS", "listing", "found", "at", "[", "http", ":", "//", "images", ".", "webofknowledge", ".", "com", "/", "WOK46", "/", "help", "/"...
python
train
grahambell/pymoc
lib/pymoc/util/tool.py
https://github.com/grahambell/pymoc/blob/0e2e57ce07ff3de6ac024627c1fb6ad30c2fde48/lib/pymoc/util/tool.py#L353-L400
def plot(self): """Show the running MOC on an all-sky map. This command requires that the Healpy and matplotlib libraries be available. It plots the running MOC, which should be normalized to a lower order first if it would generate an excessively large pixel array. :: pymoctool a.moc --normalize 8 --plot It also accepts additional arguments which can be used to control the plot. The 'order' option can be used instead of normalizing the MOC before plotting. The 'antialias' option specifies an additional number of MOC orders which should be used to smooth the edges as plotted -- 1 or 2 is normally sufficient. The 'file' option can be given to specify a file to which the plot should be saved. :: pymoctool ... --plot [order <order>] [antialias <level>] [file <filename>] ... """ if self.moc is None: raise CommandError('No MOC information present for plotting') from .plot import plot_moc order = self.moc.order antialias = 0 filename = None while self.params: if self.params[-1] == 'order': self.params.pop() order = int(self.params.pop()) elif self.params[-1] == 'antialias': self.params.pop() antialias = int(self.params.pop()) elif self.params[-1] == 'file': self.params.pop() filename = self.params.pop() else: break plot_moc(self.moc, order=order, antialias=antialias, filename=filename, projection='moll')
[ "def", "plot", "(", "self", ")", ":", "if", "self", ".", "moc", "is", "None", ":", "raise", "CommandError", "(", "'No MOC information present for plotting'", ")", "from", ".", "plot", "import", "plot_moc", "order", "=", "self", ".", "moc", ".", "order", "a...
Show the running MOC on an all-sky map. This command requires that the Healpy and matplotlib libraries be available. It plots the running MOC, which should be normalized to a lower order first if it would generate an excessively large pixel array. :: pymoctool a.moc --normalize 8 --plot It also accepts additional arguments which can be used to control the plot. The 'order' option can be used instead of normalizing the MOC before plotting. The 'antialias' option specifies an additional number of MOC orders which should be used to smooth the edges as plotted -- 1 or 2 is normally sufficient. The 'file' option can be given to specify a file to which the plot should be saved. :: pymoctool ... --plot [order <order>] [antialias <level>] [file <filename>] ...
[ "Show", "the", "running", "MOC", "on", "an", "all", "-", "sky", "map", "." ]
python
train
openstack/networking-hyperv
networking_hyperv/neutron/trunk_driver.py
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/trunk_driver.py#L61-L85
def handle_subports(self, subports, event_type): """Subport data model change from the server.""" LOG.debug("Subports event received: %(event_type)s. " "Subports: %(subports)s", {'event_type': event_type, 'subports': subports}) # update the cache. if event_type == events.CREATED: for subport in subports: trunk = self._trunks.get(subport['trunk_id']) if trunk: trunk.sub_ports.append(subport) elif event_type == events.DELETED: for subport in subports: trunk = self._trunks.get(subport['trunk_id']) if trunk and subport in trunk.sub_ports: trunk.sub_ports.remove(subport) # update the bound trunks. affected_trunk_ids = set([s['trunk_id'] for s in subports]) for trunk_id in affected_trunk_ids: trunk = self._trunks.get(trunk_id) if trunk: self._setup_trunk(trunk)
[ "def", "handle_subports", "(", "self", ",", "subports", ",", "event_type", ")", ":", "LOG", ".", "debug", "(", "\"Subports event received: %(event_type)s. \"", "\"Subports: %(subports)s\"", ",", "{", "'event_type'", ":", "event_type", ",", "'subports'", ":", "subports...
Subport data model change from the server.
[ "Subport", "data", "model", "change", "from", "the", "server", "." ]
python
train
aio-libs/aiohttp
aiohttp/client.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/client.py#L1035-L1119
def request( method: str, url: StrOrURL, *, params: Optional[Mapping[str, str]]=None, data: Any=None, json: Any=None, headers: LooseHeaders=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, allow_redirects: bool=True, max_redirects: int=10, compress: Optional[str]=None, chunked: Optional[bool]=None, expect100: bool=False, raise_for_status: Optional[bool]=None, read_until_eof: bool=True, proxy: Optional[StrOrURL]=None, proxy_auth: Optional[BasicAuth]=None, timeout: Union[ClientTimeout, object]=sentinel, cookies: Optional[LooseCookies]=None, version: HttpVersion=http.HttpVersion11, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None ) -> _SessionRequestContextManager: """Constructs and sends a request. Returns response object. method - HTTP method url - request url params - (optional) Dictionary or bytes to be sent in the query string of the new request data - (optional) Dictionary, bytes, or file-like object to send in the body of the request json - (optional) Any json compatible python object headers - (optional) Dictionary of HTTP Headers to send with the request cookies - (optional) Dict object to send with the request auth - (optional) BasicAuth named tuple represent HTTP Basic Auth auth - aiohttp.helpers.BasicAuth allow_redirects - (optional) If set to False, do not follow redirects version - Request HTTP version. compress - Set to True if request has to be compressed with deflate encoding. chunked - Set to chunk size for chunked transfer encoding. expect100 - Expect 100-continue response from server. connector - BaseConnector sub-class instance to support connection pooling. read_until_eof - Read response until eof if response does not have Content-Length header. loop - Optional event loop. timeout - Optional ClientTimeout settings structure, 5min total timeout by default. Usage:: >>> import aiohttp >>> resp = await aiohttp.request('GET', 'http://python.org/') >>> resp <ClientResponse(python.org/) [200]> >>> data = await resp.read() """ connector_owner = False if connector is None: connector_owner = True connector = TCPConnector(loop=loop, force_close=True) session = ClientSession( loop=loop, cookies=cookies, version=version, timeout=timeout, connector=connector, connector_owner=connector_owner) return _SessionRequestContextManager( session._request(method, url, params=params, data=data, json=json, headers=headers, skip_auto_headers=skip_auto_headers, auth=auth, allow_redirects=allow_redirects, max_redirects=max_redirects, compress=compress, chunked=chunked, expect100=expect100, raise_for_status=raise_for_status, read_until_eof=read_until_eof, proxy=proxy, proxy_auth=proxy_auth,), session)
[ "def", "request", "(", "method", ":", "str", ",", "url", ":", "StrOrURL", ",", "*", ",", "params", ":", "Optional", "[", "Mapping", "[", "str", ",", "str", "]", "]", "=", "None", ",", "data", ":", "Any", "=", "None", ",", "json", ":", "Any", "=...
Constructs and sends a request. Returns response object. method - HTTP method url - request url params - (optional) Dictionary or bytes to be sent in the query string of the new request data - (optional) Dictionary, bytes, or file-like object to send in the body of the request json - (optional) Any json compatible python object headers - (optional) Dictionary of HTTP Headers to send with the request cookies - (optional) Dict object to send with the request auth - (optional) BasicAuth named tuple represent HTTP Basic Auth auth - aiohttp.helpers.BasicAuth allow_redirects - (optional) If set to False, do not follow redirects version - Request HTTP version. compress - Set to True if request has to be compressed with deflate encoding. chunked - Set to chunk size for chunked transfer encoding. expect100 - Expect 100-continue response from server. connector - BaseConnector sub-class instance to support connection pooling. read_until_eof - Read response until eof if response does not have Content-Length header. loop - Optional event loop. timeout - Optional ClientTimeout settings structure, 5min total timeout by default. Usage:: >>> import aiohttp >>> resp = await aiohttp.request('GET', 'http://python.org/') >>> resp <ClientResponse(python.org/) [200]> >>> data = await resp.read()
[ "Constructs", "and", "sends", "a", "request", ".", "Returns", "response", "object", ".", "method", "-", "HTTP", "method", "url", "-", "request", "url", "params", "-", "(", "optional", ")", "Dictionary", "or", "bytes", "to", "be", "sent", "in", "the", "qu...
python
train
cackharot/suds-py3
suds/xsd/schema.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/xsd/schema.py#L99-L118
def autoblend(self): """ Ensure that all schemas within the collection import each other which has a blending effect. @return: self @rtype: L{SchemaCollection} """ namespaces = self.namespaces.keys() for s in self.children: for ns in namespaces: tns = s.root.get('targetNamespace') if tns == ns: continue for imp in s.root.getChildren('import'): if imp.get('namespace') == ns: continue imp = Element('import', ns=Namespace.xsdns) imp.set('namespace', ns) s.root.append(imp) return self
[ "def", "autoblend", "(", "self", ")", ":", "namespaces", "=", "self", ".", "namespaces", ".", "keys", "(", ")", "for", "s", "in", "self", ".", "children", ":", "for", "ns", "in", "namespaces", ":", "tns", "=", "s", ".", "root", ".", "get", "(", "...
Ensure that all schemas within the collection import each other which has a blending effect. @return: self @rtype: L{SchemaCollection}
[ "Ensure", "that", "all", "schemas", "within", "the", "collection", "import", "each", "other", "which", "has", "a", "blending", "effect", "." ]
python
train
thefactory/marathon-python
marathon/client.py
https://github.com/thefactory/marathon-python/blob/592b253aa8edf2475c97ca438ad7b6936652caf2/marathon/client.py#L67-L74
def _parse_response(response, clazz, is_list=False, resource_name=None): """Parse a Marathon response into an object or list of objects.""" target = response.json()[ resource_name] if resource_name else response.json() if is_list: return [clazz.from_json(resource) for resource in target] else: return clazz.from_json(target)
[ "def", "_parse_response", "(", "response", ",", "clazz", ",", "is_list", "=", "False", ",", "resource_name", "=", "None", ")", ":", "target", "=", "response", ".", "json", "(", ")", "[", "resource_name", "]", "if", "resource_name", "else", "response", ".",...
Parse a Marathon response into an object or list of objects.
[ "Parse", "a", "Marathon", "response", "into", "an", "object", "or", "list", "of", "objects", "." ]
python
train
political-memory/django-representatives
representatives/migrations/0016_chamber_migrate_data.py
https://github.com/political-memory/django-representatives/blob/811c90d0250149e913e6196f0ab11c97d396be39/representatives/migrations/0016_chamber_migrate_data.py#L9-L36
def calculate_hash(obj): """ Computes fingerprint for an object, this code is duplicated from representatives.models.HashableModel because we don't have access to model methods in a migration scenario. """ hashable_fields = { 'Chamber': ['name', 'country', 'abbreviation'], 'Constituency': ['name'], 'Group': ['name', 'abbreviation', 'kind', 'chamber'], 'Mandate': ['group', 'constituency', 'role', 'begin_date', 'end_date', 'representative'] } fingerprint = hashlib.sha1() for field_name in hashable_fields[obj.__class__.__name__]: field = obj._meta.get_field(field_name) if field.is_relation: related = getattr(obj, field_name) if related is None: fingerprint.update(smart_str(related)) else: fingerprint.update(related.fingerprint) else: fingerprint.update(smart_str(getattr(obj, field_name))) obj.fingerprint = fingerprint.hexdigest() return obj.fingerprint
[ "def", "calculate_hash", "(", "obj", ")", ":", "hashable_fields", "=", "{", "'Chamber'", ":", "[", "'name'", ",", "'country'", ",", "'abbreviation'", "]", ",", "'Constituency'", ":", "[", "'name'", "]", ",", "'Group'", ":", "[", "'name'", ",", "'abbreviati...
Computes fingerprint for an object, this code is duplicated from representatives.models.HashableModel because we don't have access to model methods in a migration scenario.
[ "Computes", "fingerprint", "for", "an", "object", "this", "code", "is", "duplicated", "from", "representatives", ".", "models", ".", "HashableModel", "because", "we", "don", "t", "have", "access", "to", "model", "methods", "in", "a", "migration", "scenario", "...
python
train
proycon/clam
clam/common/data.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L1800-L1822
def xml(self, indent = ""): """Produce Template XML""" xml = indent + "<OutputTemplate id=\"" + self.id + "\" format=\"" + self.formatclass.__name__ + "\"" + " label=\"" + self.label + "\"" if self.formatclass.mimetype: xml +=" mimetype=\""+self.formatclass.mimetype+"\"" if self.formatclass.schema: xml +=" schema=\""+clam.common.util.xmlescape(self.formatclass.schema)+"\"" if self.filename: xml +=" filename=\""+clam.common.util.xmlescape(self.filename)+"\"" if self.extension: xml +=" extension=\""+clam.common.util.xmlescape(self.extension)+"\"" if self.parent: xml +=" parent=\""+clam.common.util.xmlescape(self.parent)+"\"" if self.unique: xml +=" unique=\"yes\"" else: xml +=" unique=\"no\"" xml += ">\n" for metafield in self.metafields: xml += metafield.xml(indent + " ") + "\n" xml += indent + "</OutputTemplate>" return xml
[ "def", "xml", "(", "self", ",", "indent", "=", "\"\"", ")", ":", "xml", "=", "indent", "+", "\"<OutputTemplate id=\\\"\"", "+", "self", ".", "id", "+", "\"\\\" format=\\\"\"", "+", "self", ".", "formatclass", ".", "__name__", "+", "\"\\\"\"", "+", "\" labe...
Produce Template XML
[ "Produce", "Template", "XML" ]
python
train
SteveMcGrath/pySecurityCenter
securitycenter/sc4.py
https://github.com/SteveMcGrath/pySecurityCenter/blob/f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880/securitycenter/sc4.py#L866-L904
def asset_create_combo(self, name, combo, tag='', description=''): '''asset_create_combo name, combination, tag, description Creates a new combination asset list. Operands can be either asset list IDs or be a nested combination asset list. UN-DOCUMENTED CALL: This function is not considered stable. AND = intersection OR = union operand = asset list ID or nested combination. operator = intersection or union. Example: combo = { 'operand1': { 'operand1': '2', 'operand2': '2', 'operation': 'union', }, 'operand2': '3', 'operation': 'intersection' } :param name: Name of the asset list. :type name: string :param combo: dict :param tag: The tag of the asset list. :type tag: string :param description: Description of the asset list. :type description: string ''' return self.raw_query('asset', 'add', data={ 'name': name, 'description': description, 'type': 'combination', 'combinations': combo, })
[ "def", "asset_create_combo", "(", "self", ",", "name", ",", "combo", ",", "tag", "=", "''", ",", "description", "=", "''", ")", ":", "return", "self", ".", "raw_query", "(", "'asset'", ",", "'add'", ",", "data", "=", "{", "'name'", ":", "name", ",", ...
asset_create_combo name, combination, tag, description Creates a new combination asset list. Operands can be either asset list IDs or be a nested combination asset list. UN-DOCUMENTED CALL: This function is not considered stable. AND = intersection OR = union operand = asset list ID or nested combination. operator = intersection or union. Example: combo = { 'operand1': { 'operand1': '2', 'operand2': '2', 'operation': 'union', }, 'operand2': '3', 'operation': 'intersection' } :param name: Name of the asset list. :type name: string :param combo: dict :param tag: The tag of the asset list. :type tag: string :param description: Description of the asset list. :type description: string
[ "asset_create_combo", "name", "combination", "tag", "description", "Creates", "a", "new", "combination", "asset", "list", ".", "Operands", "can", "be", "either", "asset", "list", "IDs", "or", "be", "a", "nested", "combination", "asset", "list", "." ]
python
train
Azure/msrestazure-for-python
msrestazure/azure_active_directory.py
https://github.com/Azure/msrestazure-for-python/blob/5f99262305692525d03ca87d2c5356b05c5aa874/msrestazure/azure_active_directory.py#L159-L174
def _convert_token(self, token): """Convert token fields from camel case. :param dict token: An authentication token. :rtype: dict """ # Beware that ADAL returns a pointer to its own dict, do # NOT change it in place token = token.copy() # If it's from ADAL, expiresOn will be in ISO form. # Bring it back to float, using expiresIn if "expiresOn" in token and "expiresIn" in token: token["expiresOn"] = token['expiresIn'] + time.time() return {self._case.sub(r'\1_\2', k).lower(): v for k, v in token.items()}
[ "def", "_convert_token", "(", "self", ",", "token", ")", ":", "# Beware that ADAL returns a pointer to its own dict, do", "# NOT change it in place", "token", "=", "token", ".", "copy", "(", ")", "# If it's from ADAL, expiresOn will be in ISO form.", "# Bring it back to float, us...
Convert token fields from camel case. :param dict token: An authentication token. :rtype: dict
[ "Convert", "token", "fields", "from", "camel", "case", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/build/build_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/build/build_client.py#L49-L70
def get_artifact(self, project, build_id, artifact_name): """GetArtifact. Gets a specific artifact for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str artifact_name: The name of the artifact. :rtype: :class:`<BuildArtifact> <azure.devops.v5_0.build.models.BuildArtifact>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if artifact_name is not None: query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str') response = self._send(http_method='GET', location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('BuildArtifact', response)
[ "def", "get_artifact", "(", "self", ",", "project", ",", "build_id", ",", "artifact_name", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "ur...
GetArtifact. Gets a specific artifact for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str artifact_name: The name of the artifact. :rtype: :class:`<BuildArtifact> <azure.devops.v5_0.build.models.BuildArtifact>`
[ "GetArtifact", ".", "Gets", "a", "specific", "artifact", "for", "a", "build", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "build_id", ":", "The", "ID", "of", "the", "build", ".", ":", "param"...
python
train
fermiPy/fermipy
fermipy/utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/utils.py#L1239-L1302
def merge_dict(d0, d1, add_new_keys=False, append_arrays=False): """Recursively merge the contents of python dictionary d0 with the contents of another python dictionary, d1. Parameters ---------- d0 : dict The input dictionary. d1 : dict Dictionary to be merged with the input dictionary. add_new_keys : str Do not skip keys that only exist in d1. append_arrays : bool If an element is a numpy array set the value of that element by concatenating the two arrays. """ if d1 is None: return d0 elif d0 is None: return d1 elif d0 is None and d1 is None: return {} od = {} for k, v in d0.items(): t0 = None t1 = None if k in d0: t0 = type(d0[k]) if k in d1: t1 = type(d1[k]) if k not in d1: od[k] = copy.deepcopy(d0[k]) elif isinstance(v, dict) and isinstance(d1[k], dict): od[k] = merge_dict(d0[k], d1[k], add_new_keys, append_arrays) elif isinstance(v, list) and isstr(d1[k]): od[k] = d1[k].split(',') elif isinstance(v, dict) and d1[k] is None: od[k] = copy.deepcopy(d0[k]) elif isinstance(v, np.ndarray) and append_arrays: od[k] = np.concatenate((v, d1[k])) elif (d0[k] is not None and d1[k] is not None) and t0 != t1: if t0 == dict or t0 == list: raise Exception('Conflicting types in dictionary merge for ' 'key %s %s %s' % (k, t0, t1)) od[k] = t0(d1[k]) else: od[k] = copy.copy(d1[k]) if add_new_keys: for k, v in d1.items(): if k not in d0: od[k] = copy.deepcopy(d1[k]) return od
[ "def", "merge_dict", "(", "d0", ",", "d1", ",", "add_new_keys", "=", "False", ",", "append_arrays", "=", "False", ")", ":", "if", "d1", "is", "None", ":", "return", "d0", "elif", "d0", "is", "None", ":", "return", "d1", "elif", "d0", "is", "None", ...
Recursively merge the contents of python dictionary d0 with the contents of another python dictionary, d1. Parameters ---------- d0 : dict The input dictionary. d1 : dict Dictionary to be merged with the input dictionary. add_new_keys : str Do not skip keys that only exist in d1. append_arrays : bool If an element is a numpy array set the value of that element by concatenating the two arrays.
[ "Recursively", "merge", "the", "contents", "of", "python", "dictionary", "d0", "with", "the", "contents", "of", "another", "python", "dictionary", "d1", "." ]
python
train
MacHu-GWU/constant2-project
constant2/_constant2.py
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L282-L300
def get_first(self, attr, value, e=0.000001, sort_by="__name__", reverse=False): """Get the first nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionchanged:: 0.0.5 """ for _, klass in self.subclasses(sort_by, reverse): try: if getattr(klass, attr) == approx(value, e): return klass except: pass return None
[ "def", "get_first", "(", "self", ",", "attr", ",", "value", ",", "e", "=", "0.000001", ",", "sort_by", "=", "\"__name__\"", ",", "reverse", "=", "False", ")", ":", "for", "_", ",", "klass", "in", "self", ".", "subclasses", "(", "sort_by", ",", "rever...
Get the first nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionchanged:: 0.0.5
[ "Get", "the", "first", "nested", "Constant", "class", "that", "met", "klass", ".", "attr", "==", "value", "." ]
python
train
kudos/passwords
passwords/pbkdf2.py
https://github.com/kudos/passwords/blob/3f404cea6b841f405d720ebeda9ef8b9c58dfef5/passwords/pbkdf2.py#L90-L119
def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None): """Returns a binary digest for the PBKDF2 hash algorithm of `data` with the given `salt`. It iterates `iterations` time and produces a key of `keylen` bytes. By default SHA-1 is used as hash function, a different hashlib `hashfunc` can be provided. """ hashfunc = hashfunc or hashlib.sha1 mac = hmac.new(bytes_(data), None, hashfunc) def _pseudorandom(x, mac=mac): h = mac.copy() h.update(bytes_(x)) if not PY2: return [x for x in h.digest()] else: return map(ord, h.digest()) buf = [] for block in range_(1, -(-keylen // mac.digest_size) + 1): rv = u = _pseudorandom(bytes_(salt) + _pack_int(block)) for i in range_(iterations - 1): if not PY2: u = _pseudorandom(bytes(u)) else: u = _pseudorandom(''.join(map(chr, u))) rv = starmap(xor, zip(rv, u)) buf.extend(rv) if not PY2: return bytes(buf)[:keylen] else: return ''.join(map(chr, buf))[:keylen]
[ "def", "pbkdf2_bin", "(", "data", ",", "salt", ",", "iterations", "=", "1000", ",", "keylen", "=", "24", ",", "hashfunc", "=", "None", ")", ":", "hashfunc", "=", "hashfunc", "or", "hashlib", ".", "sha1", "mac", "=", "hmac", ".", "new", "(", "bytes_",...
Returns a binary digest for the PBKDF2 hash algorithm of `data` with the given `salt`. It iterates `iterations` time and produces a key of `keylen` bytes. By default SHA-1 is used as hash function, a different hashlib `hashfunc` can be provided.
[ "Returns", "a", "binary", "digest", "for", "the", "PBKDF2", "hash", "algorithm", "of", "data", "with", "the", "given", "salt", ".", "It", "iterates", "iterations", "time", "and", "produces", "a", "key", "of", "keylen", "bytes", ".", "By", "default", "SHA",...
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1438-L1442
def p_delays_floatnumber(self, p): 'delays : DELAY floatnumber' p[0] = DelayStatement(FloatConst( p[2], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_delays_floatnumber", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "DelayStatement", "(", "FloatConst", "(", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", ",", "lineno", "=", "p", ".", "li...
delays : DELAY floatnumber
[ "delays", ":", "DELAY", "floatnumber" ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/call_api.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/call_api.py#L16-L53
def _build_filters(_filters): """Builds filters using the filter options passed into the CLI. This only supports the equals keyword at the moment. """ root = utils.NestedDict({}) for _filter in _filters: operation = None for operation, token in SPLIT_TOKENS: # split "some.key=value" into ["some.key", "value"] top_parts = _filter.split(token, 1) if len(top_parts) == 2: break else: raise exceptions.CLIAbort('Failed to find valid operation for: %s' % _filter) key, value = top_parts current = root # split "some.key" into ["some", "key"] parts = [part.strip() for part in key.split('.')] # Actually drill down and add the filter for part in parts[:-1]: current = current[part] if operation == 'eq': current[parts[-1]] = utils.query_filter(value.strip()) elif operation == 'in': current[parts[-1]] = { 'operation': 'in', 'options': [{ 'name': 'data', 'value': [p.strip() for p in value.split(',')], }], } return root.to_dict()
[ "def", "_build_filters", "(", "_filters", ")", ":", "root", "=", "utils", ".", "NestedDict", "(", "{", "}", ")", "for", "_filter", "in", "_filters", ":", "operation", "=", "None", "for", "operation", ",", "token", "in", "SPLIT_TOKENS", ":", "# split \"some...
Builds filters using the filter options passed into the CLI. This only supports the equals keyword at the moment.
[ "Builds", "filters", "using", "the", "filter", "options", "passed", "into", "the", "CLI", "." ]
python
train
PMEAL/OpenPNM
openpnm/models/misc/misc.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/misc/misc.py#L292-L341
def generic_distribution(target, seeds, func): r""" Accepts an 'rv_frozen' object from the Scipy.stats submodule and returns values from the distribution for the given seeds This uses the ``ppf`` method of the stats object Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. seeds : string, optional The dictionary key on the Geometry object containing random seed values (between 0 and 1) to use in the statistical distribution. func : object An 'rv_frozen' object from the Scipy.stats library with all of the parameters pre-specified. Examples -------- The following code illustrates the process of obtaining a 'frozen' Scipy stats object and adding it as a model: >>> import scipy >>> import openpnm as op >>> pn = op.network.Cubic(shape=[3, 3, 3]) >>> geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps, throats=pn.Ts) >>> geo.add_model(propname='pore.seed', ... model=op.models.geometry.pore_seed.random) Now retrieve the stats distribution and add to ``geo`` as a model: >>> stats_obj = scipy.stats.weibull_min(c=2, scale=.0001, loc=0) >>> geo.add_model(propname='pore.size', ... model=op.models.geometry.pore_size.generic_distribution, ... seeds='pore.seed', ... func=stats_obj) >>> import matplotlib.pyplot as plt >>> fig = plt.hist(stats_obj.ppf(q=scipy.rand(1000)), bins=50) """ seeds = target[seeds] value = func.ppf(seeds) return value
[ "def", "generic_distribution", "(", "target", ",", "seeds", ",", "func", ")", ":", "seeds", "=", "target", "[", "seeds", "]", "value", "=", "func", ".", "ppf", "(", "seeds", ")", "return", "value" ]
r""" Accepts an 'rv_frozen' object from the Scipy.stats submodule and returns values from the distribution for the given seeds This uses the ``ppf`` method of the stats object Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. seeds : string, optional The dictionary key on the Geometry object containing random seed values (between 0 and 1) to use in the statistical distribution. func : object An 'rv_frozen' object from the Scipy.stats library with all of the parameters pre-specified. Examples -------- The following code illustrates the process of obtaining a 'frozen' Scipy stats object and adding it as a model: >>> import scipy >>> import openpnm as op >>> pn = op.network.Cubic(shape=[3, 3, 3]) >>> geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps, throats=pn.Ts) >>> geo.add_model(propname='pore.seed', ... model=op.models.geometry.pore_seed.random) Now retrieve the stats distribution and add to ``geo`` as a model: >>> stats_obj = scipy.stats.weibull_min(c=2, scale=.0001, loc=0) >>> geo.add_model(propname='pore.size', ... model=op.models.geometry.pore_size.generic_distribution, ... seeds='pore.seed', ... func=stats_obj) >>> import matplotlib.pyplot as plt >>> fig = plt.hist(stats_obj.ppf(q=scipy.rand(1000)), bins=50)
[ "r", "Accepts", "an", "rv_frozen", "object", "from", "the", "Scipy", ".", "stats", "submodule", "and", "returns", "values", "from", "the", "distribution", "for", "the", "given", "seeds" ]
python
train
empirical-org/Quill-NLP-Tools-and-Datasets
utils/qfragment/examples/porcupine/app.py
https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/examples/porcupine/app.py#L59-L102
def get_submissions(): """API endpoint to get submissions in JSON format""" print(request.args.to_dict()) print(request.args.get('search[value]')) print(request.args.get('draw', 1)) # submissions = session.query(Submission).all() if request.args.get('correct_filter', 'all') == 'all': correct_filter = [True, False] elif request.args['correct_filter'] == 'correct': correct_filter = [True] else: correct_filter = [False] if request.args.get('order[0][column]', '0') == '0': column = 'id' elif request.args['order[0][column]'] == '1': column = 'text' else: column = 'primary_error' order_str = "{} {}".format(column, request.args.get('order[0][dir]', 'desc')) search_val = request.args.get('search[value]') draw = request.args.get('draw', 1) filtered_len = session.query(Submission)\ .filter(Submission.text.startswith(search_val))\ .filter(Submission.correct.in_(correct_filter))\ .count() subs = \ session.query(Submission).filter(Submission.text.startswith(search_val))\ .filter(Submission.correct.in_(correct_filter))\ .order_by(order_str)\ .offset(request.args.get('start', 0))\ .limit(request.args.get('length', 10))\ .all() submissions = {'draw': draw, 'recordsTotal':0, 'recordsFiltered':0, 'data':[]} i = 0 for i, submission in enumerate(subs): submissions['data'].append([submission.id, submission.text, submission.primary_error, submission.correct]) submissions['recordsTotal'] = session.query(Submission).count() submissions['recordsFiltered'] = filtered_len return jsonify(submissions)
[ "def", "get_submissions", "(", ")", ":", "print", "(", "request", ".", "args", ".", "to_dict", "(", ")", ")", "print", "(", "request", ".", "args", ".", "get", "(", "'search[value]'", ")", ")", "print", "(", "request", ".", "args", ".", "get", "(", ...
API endpoint to get submissions in JSON format
[ "API", "endpoint", "to", "get", "submissions", "in", "JSON", "format" ]
python
train
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L409-L446
def simplify_countryname(cls, country): # type: (str) -> (str, List[str]) """Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc. Args: country (str): Country name to simplify Returns: Tuple[str, List[str]]: Uppercase simplified country name and list of removed words """ countryupper = country.upper() words = get_words_in_sentence(countryupper) index = countryupper.find(',') if index != -1: countryupper = countryupper[:index] index = countryupper.find(':') if index != -1: countryupper = countryupper[:index] regex = re.compile('\(.+?\)') countryupper = regex.sub('', countryupper) remove = copy.deepcopy(cls.simplifications) for simplification1, simplification2 in cls.abbreviations.items(): countryupper = countryupper.replace(simplification1, '') remove.append(simplification2) for simplification1, simplifications in cls.multiple_abbreviations.items(): countryupper = countryupper.replace(simplification1, '') for simplification2 in simplifications: remove.append(simplification2) remove = '|'.join(remove) regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE) countryupper = regex.sub('', countryupper) countryupper = countryupper.strip() countryupper_words = get_words_in_sentence(countryupper) if len(countryupper_words) > 1: countryupper = countryupper_words[0] if countryupper: words.remove(countryupper) return countryupper, words
[ "def", "simplify_countryname", "(", "cls", ",", "country", ")", ":", "# type: (str) -> (str, List[str])", "countryupper", "=", "country", ".", "upper", "(", ")", "words", "=", "get_words_in_sentence", "(", "countryupper", ")", "index", "=", "countryupper", ".", "f...
Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc. Args: country (str): Country name to simplify Returns: Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
[ "Simplifies", "country", "name", "by", "removing", "descriptive", "text", "eg", ".", "DEMOCRATIC", "REPUBLIC", "OF", "etc", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/utilities/kvstore_json.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/kvstore_json.py#L48-L58
def _load_file(self): """Load all entries from json backing file """ if not os.path.exists(self.file): return {} with open(self.file, "r") as infile: data = json.load(infile) return data
[ "def", "_load_file", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "file", ")", ":", "return", "{", "}", "with", "open", "(", "self", ".", "file", ",", "\"r\"", ")", "as", "infile", ":", "data", "=", "j...
Load all entries from json backing file
[ "Load", "all", "entries", "from", "json", "backing", "file" ]
python
train
drericstrong/pyedna
pyedna/ezdna.py
https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L729-L741
def SelectPoint(): """ Opens an eDNA point picker, where the user can select a single tag. :return: selected tag name """ # Define all required variables in the correct ctypes format pszPoint = create_string_buffer(20) nPoint = c_ushort(20) # Opens the point picker dna_dll.DnaSelectPoint(byref(pszPoint), nPoint) tag_result = pszPoint.value.decode('utf-8') return tag_result
[ "def", "SelectPoint", "(", ")", ":", "# Define all required variables in the correct ctypes format\r", "pszPoint", "=", "create_string_buffer", "(", "20", ")", "nPoint", "=", "c_ushort", "(", "20", ")", "# Opens the point picker\r", "dna_dll", ".", "DnaSelectPoint", "(", ...
Opens an eDNA point picker, where the user can select a single tag. :return: selected tag name
[ "Opens", "an", "eDNA", "point", "picker", "where", "the", "user", "can", "select", "a", "single", "tag", ".", ":", "return", ":", "selected", "tag", "name" ]
python
train
BYU-PCCL/holodeck
holodeck/environments.py
https://github.com/BYU-PCCL/holodeck/blob/01acd4013f5acbd9f61fbc9caaafe19975e8b121/holodeck/environments.py#L262-L273
def spawn_agent(self, agent_definition, location): """Queues a spawn agent command. It will be applied when `tick` or `step` is called next. The agent won't be able to be used until the next frame. Args: agent_definition (:obj:`AgentDefinition`): The definition of the agent to spawn. location (np.ndarray or list): The position to spawn the agent in the world, in XYZ coordinates (in meters). """ self._should_write_to_command_buffer = True self._add_agents(agent_definition) command_to_send = SpawnAgentCommand(location, agent_definition.name, agent_definition.type) self._commands.add_command(command_to_send)
[ "def", "spawn_agent", "(", "self", ",", "agent_definition", ",", "location", ")", ":", "self", ".", "_should_write_to_command_buffer", "=", "True", "self", ".", "_add_agents", "(", "agent_definition", ")", "command_to_send", "=", "SpawnAgentCommand", "(", "location"...
Queues a spawn agent command. It will be applied when `tick` or `step` is called next. The agent won't be able to be used until the next frame. Args: agent_definition (:obj:`AgentDefinition`): The definition of the agent to spawn. location (np.ndarray or list): The position to spawn the agent in the world, in XYZ coordinates (in meters).
[ "Queues", "a", "spawn", "agent", "command", ".", "It", "will", "be", "applied", "when", "tick", "or", "step", "is", "called", "next", ".", "The", "agent", "won", "t", "be", "able", "to", "be", "used", "until", "the", "next", "frame", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L565-L579
def imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1(): """For 256x256.""" hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g() # TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in # image transformer training implementation? # hparams.img_len = 256 hparams.max_length = 66000 # allow for 256x256 hparams.batch_size = 1 hparams.num_decoder_layers = 5 hparams.hidden_size = 128 hparams.filter_size = 128 hparams.attention_key_channels = 64 hparams.attention_value_channels = 64 hparams.layer_prepostprocess_dropout = 0.0 return hparams
[ "def", "imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1", "(", ")", ":", "hparams", "=", "imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g", "(", ")", "# TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in", "# image transformer training implementation?", "...
For 256x256.
[ "For", "256x256", "." ]
python
train
edeposit/marcxml2mods
src/marcxml2mods/mods_postprocessor/monograph.py
https://github.com/edeposit/marcxml2mods/blob/7b44157e859b4d2a372f79598ddbf77e43d39812/src/marcxml2mods/mods_postprocessor/monograph.py#L318-L344
def fix_missing_lang_tags(marc_xml, dom): """ If the lang tags are missing, add them to the MODS. Lang tags are parsed from `marc_xml`. """ def get_lang_tag(lang): lang_str = '\n <mods:language>\n' lang_str += ' <mods:languageTerm authority="iso639-2b" type="code">' lang_str += lang lang_str += '</mods:languageTerm>\n' lang_str += ' </mods:language>\n\n' lang_dom = dhtmlparser.parseString(lang_str) return first(lang_dom.find("mods:language")) for lang in reversed(marc_xml["041a0 "]): lang_tag = dom.find( "mods:languageTerm", fn=lambda x: x.getContent().strip().lower() == lang.lower() ) if not lang_tag: insert_tag( get_lang_tag(lang), dom.find("mods:language"), get_mods_tag(dom) )
[ "def", "fix_missing_lang_tags", "(", "marc_xml", ",", "dom", ")", ":", "def", "get_lang_tag", "(", "lang", ")", ":", "lang_str", "=", "'\\n <mods:language>\\n'", "lang_str", "+=", "' <mods:languageTerm authority=\"iso639-2b\" type=\"code\">'", "lang_str", "+=", "lang"...
If the lang tags are missing, add them to the MODS. Lang tags are parsed from `marc_xml`.
[ "If", "the", "lang", "tags", "are", "missing", "add", "them", "to", "the", "MODS", ".", "Lang", "tags", "are", "parsed", "from", "marc_xml", "." ]
python
train
mkouhei/tonicdnscli
src/tonicdnscli/connect.py
https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/connect.py#L135-L207
def response(uri, method, res, token='', keyword='', content='', raw_flag=False): """Response of tonicdns_client request Arguments: uri: TonicDNS API URI method: TonicDNS API request method res: Response of against request to TonicDNS API token: TonicDNS API token keyword: Processing keyword content: JSON data raw_flag: True is return responsed raw data, False is pretty print """ if method == 'GET' or (method == 'PUT' and not token): # response body data = res.read() data_utf8 = data.decode('utf-8') if token: datas = json.loads(data_utf8) else: token = json.loads(data_utf8)['hash'] return token if keyword == 'serial': # filtering with keyword record = search_record(datas, 'SOA')[0] # if SOA record, remove priority unnecessary del record['priority'] # override ttl record['ttl'] = int(record['ttl']) c = JSONConverter(content['domain']) new_record = c.get_soa(record, content) return record, new_record elif keyword: # '--search' option of 'get' subcommand records = search_record(datas, keyword) datas.update({"records": records}) if uri.split('/')[3] == 'template': # 'tmpl_get' subcommand if len(uri.split('/')) == 5: # when specify template identfier #print_formatted(datas) utils.pretty_print(datas) else: # when get all templates for data in datas: #print_formatted(data) utils.pretty_print(datas) else: # 'get' subcommand if raw_flag: return datas else: #print_formatted(datas) if len(uri.split('zone/')) > 1: domain = uri.split('zone/')[1] else: domain = '' utils.pretty_print(datas, keyword, domain) else: # response non JSON data data = res.read() print(data)
[ "def", "response", "(", "uri", ",", "method", ",", "res", ",", "token", "=", "''", ",", "keyword", "=", "''", ",", "content", "=", "''", ",", "raw_flag", "=", "False", ")", ":", "if", "method", "==", "'GET'", "or", "(", "method", "==", "'PUT'", "...
Response of tonicdns_client request Arguments: uri: TonicDNS API URI method: TonicDNS API request method res: Response of against request to TonicDNS API token: TonicDNS API token keyword: Processing keyword content: JSON data raw_flag: True is return responsed raw data, False is pretty print
[ "Response", "of", "tonicdns_client", "request" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L560-L569
def set_mode_manual(self): '''enter MANUAL mode''' if self.mavlink10(): self.mav.command_long_send(self.target_system, self.target_component, mavlink.MAV_CMD_DO_SET_MODE, 0, mavlink.MAV_MODE_MANUAL_ARMED, 0, 0, 0, 0, 0, 0) else: MAV_ACTION_SET_MANUAL = 12 self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_SET_MANUAL)
[ "def", "set_mode_manual", "(", "self", ")", ":", "if", "self", ".", "mavlink10", "(", ")", ":", "self", ".", "mav", ".", "command_long_send", "(", "self", ".", "target_system", ",", "self", ".", "target_component", ",", "mavlink", ".", "MAV_CMD_DO_SET_MODE",...
enter MANUAL mode
[ "enter", "MANUAL", "mode" ]
python
train
ibis-project/ibis
ibis/bigquery/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/bigquery/client.py#L303-L356
def parse_project_and_dataset( project: str, dataset: Optional[str] = None ) -> Tuple[str, str, Optional[str]]: """Compute the billing project, data project, and dataset if available. This function figure out the project id under which queries will run versus the project of where the data live as well as what dataset to use. Parameters ---------- project : str A project name dataset : Optional[str] A ``<project>.<dataset>`` string or just a dataset name Examples -------- >>> data_project, billing_project, dataset = parse_project_and_dataset( ... 'ibis-gbq', ... 'foo-bar.my_dataset' ... ) >>> data_project 'foo-bar' >>> billing_project 'ibis-gbq' >>> dataset 'my_dataset' >>> data_project, billing_project, dataset = parse_project_and_dataset( ... 'ibis-gbq', ... 'my_dataset' ... ) >>> data_project 'ibis-gbq' >>> billing_project 'ibis-gbq' >>> dataset 'my_dataset' >>> data_project, billing_project, dataset = parse_project_and_dataset( ... 'ibis-gbq' ... ) >>> data_project 'ibis-gbq' >>> print(dataset) None """ try: data_project, dataset = dataset.split('.') except (ValueError, AttributeError): billing_project = data_project = project else: billing_project = project return data_project, billing_project, dataset
[ "def", "parse_project_and_dataset", "(", "project", ":", "str", ",", "dataset", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Tuple", "[", "str", ",", "str", ",", "Optional", "[", "str", "]", "]", ":", "try", ":", "data_project", ",", "d...
Compute the billing project, data project, and dataset if available. This function figure out the project id under which queries will run versus the project of where the data live as well as what dataset to use. Parameters ---------- project : str A project name dataset : Optional[str] A ``<project>.<dataset>`` string or just a dataset name Examples -------- >>> data_project, billing_project, dataset = parse_project_and_dataset( ... 'ibis-gbq', ... 'foo-bar.my_dataset' ... ) >>> data_project 'foo-bar' >>> billing_project 'ibis-gbq' >>> dataset 'my_dataset' >>> data_project, billing_project, dataset = parse_project_and_dataset( ... 'ibis-gbq', ... 'my_dataset' ... ) >>> data_project 'ibis-gbq' >>> billing_project 'ibis-gbq' >>> dataset 'my_dataset' >>> data_project, billing_project, dataset = parse_project_and_dataset( ... 'ibis-gbq' ... ) >>> data_project 'ibis-gbq' >>> print(dataset) None
[ "Compute", "the", "billing", "project", "data", "project", "and", "dataset", "if", "available", "." ]
python
train
noirbizarre/minibench
minibench/benchmark.py
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/benchmark.py#L55-L59
def label(self): '''A human readable label''' if self.__doc__ and self.__doc__.strip(): return self.__doc__.strip().splitlines()[0] return humanize(self.__class__.__name__)
[ "def", "label", "(", "self", ")", ":", "if", "self", ".", "__doc__", "and", "self", ".", "__doc__", ".", "strip", "(", ")", ":", "return", "self", ".", "__doc__", ".", "strip", "(", ")", ".", "splitlines", "(", ")", "[", "0", "]", "return", "huma...
A human readable label
[ "A", "human", "readable", "label" ]
python
train
kiwiz/gkeepapi
gkeepapi/__init__.py
https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/__init__.py#L523-L540
def resume(self, email, master_token, state=None, sync=True): """Authenticate to Google with the provided master token & sync. Args: email (str): The account to use. master_token (str): The master token. state (dict): Serialized state to load. Raises: LoginException: If there was a problem logging in. """ auth = APIAuth(self.OAUTH_SCOPES) ret = auth.load(email, master_token, android_id=get_mac()) if ret: self.load(auth, state, sync) return ret
[ "def", "resume", "(", "self", ",", "email", ",", "master_token", ",", "state", "=", "None", ",", "sync", "=", "True", ")", ":", "auth", "=", "APIAuth", "(", "self", ".", "OAUTH_SCOPES", ")", "ret", "=", "auth", ".", "load", "(", "email", ",", "mast...
Authenticate to Google with the provided master token & sync. Args: email (str): The account to use. master_token (str): The master token. state (dict): Serialized state to load. Raises: LoginException: If there was a problem logging in.
[ "Authenticate", "to", "Google", "with", "the", "provided", "master", "token", "&", "sync", "." ]
python
train
rfosterslo/wagtailplus
wagtailplus/utils/views/chooser.py
https://github.com/rfosterslo/wagtailplus/blob/22cac857175d8a6f77e470751831c14a92ccd768/wagtailplus/utils/views/chooser.py#L163-L215
def chosen_view_factory(chooser_cls): """ Returns a ChosenView class that extends specified chooser class. :param chooser_cls: the class to extend. :rtype: class. """ class ChosenView(chooser_cls): #noinspection PyUnusedLocal def get(self, request, *args, **kwargs): """ Returns GET response. :param request: the request instance. :rtype: django.http.HttpResponse. """ #noinspection PyAttributeOutsideInit self.object = self.get_object() return render_modal_workflow( self.request, None, '{0}/chosen.js'.format(self.template_dir), {'obj': self.get_json(self.object)} ) def get_object(self, queryset=None): """ Returns chosen object instance. :param queryset: the queryset instance. :rtype: django.db.models.Model. """ if queryset is None: queryset = self.get_queryset() pk = self.kwargs.get('pk', None) try: return queryset.get(pk=pk) except self.models.DoesNotExist: raise Http404() def post(self, request, *args, **kwargs): """ Returns POST response. :param request: the request instance. :rtype: django.http.HttpResponse. """ return self.get(request, *args, **kwargs) return ChosenView
[ "def", "chosen_view_factory", "(", "chooser_cls", ")", ":", "class", "ChosenView", "(", "chooser_cls", ")", ":", "#noinspection PyUnusedLocal", "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n ...
Returns a ChosenView class that extends specified chooser class. :param chooser_cls: the class to extend. :rtype: class.
[ "Returns", "a", "ChosenView", "class", "that", "extends", "specified", "chooser", "class", "." ]
python
train
Avsecz/kopt
kopt/hyopt.py
https://github.com/Avsecz/kopt/blob/fe4f929c8938590845306a759547daa5ba8bd7a9/kopt/hyopt.py#L171-L177
def count_by_state_unsynced(self, arg): """Extends the original object in order to inject checking for stalled jobs and killing them if they are running for too long """ if self.kill_timeout is not None: self.delete_running(self.kill_timeout) return super(KMongoTrials, self).count_by_state_unsynced(arg)
[ "def", "count_by_state_unsynced", "(", "self", ",", "arg", ")", ":", "if", "self", ".", "kill_timeout", "is", "not", "None", ":", "self", ".", "delete_running", "(", "self", ".", "kill_timeout", ")", "return", "super", "(", "KMongoTrials", ",", "self", ")"...
Extends the original object in order to inject checking for stalled jobs and killing them if they are running for too long
[ "Extends", "the", "original", "object", "in", "order", "to", "inject", "checking", "for", "stalled", "jobs", "and", "killing", "them", "if", "they", "are", "running", "for", "too", "long" ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1766-L1768
def findreplaceables(Class, parent, set=None,**kwargs): """Internal method to find replaceable elements. Auxiliary function used by :meth:`AbstractElement.replace`. Can be overriden for more fine-grained control.""" return list(parent.select(Class,set,False))
[ "def", "findreplaceables", "(", "Class", ",", "parent", ",", "set", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "list", "(", "parent", ".", "select", "(", "Class", ",", "set", ",", "False", ")", ")" ]
Internal method to find replaceable elements. Auxiliary function used by :meth:`AbstractElement.replace`. Can be overriden for more fine-grained control.
[ "Internal", "method", "to", "find", "replaceable", "elements", ".", "Auxiliary", "function", "used", "by", ":", "meth", ":", "AbstractElement", ".", "replace", ".", "Can", "be", "overriden", "for", "more", "fine", "-", "grained", "control", "." ]
python
train
minio/minio-py
minio/parsers.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/parsers.py#L427-L442
def parse_multi_object_delete_response(data): """Parser for Multi-Object Delete API response. :param data: XML response body content from service. :return: Returns list of error objects for each delete object that had an error. """ root = S3Element.fromstring('MultiObjectDeleteResult', data) return [ MultiDeleteError(errtag.get_child_text('Key'), errtag.get_child_text('Code'), errtag.get_child_text('Message')) for errtag in root.findall('Error') ]
[ "def", "parse_multi_object_delete_response", "(", "data", ")", ":", "root", "=", "S3Element", ".", "fromstring", "(", "'MultiObjectDeleteResult'", ",", "data", ")", "return", "[", "MultiDeleteError", "(", "errtag", ".", "get_child_text", "(", "'Key'", ")", ",", ...
Parser for Multi-Object Delete API response. :param data: XML response body content from service. :return: Returns list of error objects for each delete object that had an error.
[ "Parser", "for", "Multi", "-", "Object", "Delete", "API", "response", "." ]
python
train
dancsalo/TensorBase
tensorbase/base.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/base.py#L618-L662
def deconvnet(self, filter_sizes, output_channels, strides=None, padding=None, activation_fn=None, b_value=None, s_value=None, bn=None, trainable=True): ''' Shortcut for creating a 2D Deconvolutional Neural Network in one line Stacks multiple deconv2d layers, with arguments for each layer defined in a list. If an argument is left as None, then the conv2d defaults are kept :param filter_sizes: int. assumes square filter :param output_channels: int :param stride: int :param padding: 'VALID' or 'SAME' :param activation_fn: tf.nn function :param b_value: float :param s_value: float ''' # Number of layers to stack depth = len(filter_sizes) # Default arguments where None was passed in if strides is None: strides = np.ones(depth) if padding is None: padding = ['SAME'] * depth if activation_fn is None: activation_fn = [tf.nn.relu] * depth if b_value is None: b_value = np.zeros(depth) if s_value is None: s_value = np.ones(depth) if bn is None: bn = [True] * depth # Make sure that number of layers is consistent assert len(output_channels) == depth assert len(strides) == depth assert len(padding) == depth assert len(activation_fn) == depth assert len(b_value) == depth assert len(s_value) == depth assert len(bn) == depth # Stack convolutional layers for l in range(depth): self.deconv2d(filter_size=filter_sizes[l], output_channels=output_channels[l], stride=strides[l], padding=padding[l], activation_fn=activation_fn[l], b_value=b_value[l], s_value=s_value[l], bn=bn[l], trainable=trainable)
[ "def", "deconvnet", "(", "self", ",", "filter_sizes", ",", "output_channels", ",", "strides", "=", "None", ",", "padding", "=", "None", ",", "activation_fn", "=", "None", ",", "b_value", "=", "None", ",", "s_value", "=", "None", ",", "bn", "=", "None", ...
Shortcut for creating a 2D Deconvolutional Neural Network in one line Stacks multiple deconv2d layers, with arguments for each layer defined in a list. If an argument is left as None, then the conv2d defaults are kept :param filter_sizes: int. assumes square filter :param output_channels: int :param stride: int :param padding: 'VALID' or 'SAME' :param activation_fn: tf.nn function :param b_value: float :param s_value: float
[ "Shortcut", "for", "creating", "a", "2D", "Deconvolutional", "Neural", "Network", "in", "one", "line", "Stacks", "multiple", "deconv2d", "layers", "with", "arguments", "for", "each", "layer", "defined", "in", "a", "list", ".", "If", "an", "argument", "is", "...
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L11765-L11810
def spkcvo(target, et, outref, refloc, abcorr, obssta, obsepc, obsctr, obsref): """ Return the state of a specified target relative to an "observer," where the observer has constant velocity in a specified reference frame. The observer's state is provided by the calling program rather than by loaded SPK files. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcvo_c.html :param target: Name of target ephemeris object. :type target: str :param et: Observation epoch. :type et: float :param outref: Reference frame of output state. :type outref: str :param refloc: Output reference frame evaluation locus. :type refloc: str :param abcorr: Aberration correction. :type abcorr: str :param obssta: Observer state relative to center of motion. :type obssta: 6-Element Array of floats :param obsepc: Epoch of observer state. :type obsepc: float :param obsctr: Center of motion of observer. :type obsctr: str :param obsref: Frame of observer state. :type obsref: str :return: State of target with respect to observer, One way light time between target and observer. :rtype: tuple """ target = stypes.stringToCharP(target) et = ctypes.c_double(et) outref = stypes.stringToCharP(outref) refloc = stypes.stringToCharP(refloc) abcorr = stypes.stringToCharP(abcorr) obssta = stypes.toDoubleVector(obssta) obsepc = ctypes.c_double(obsepc) obsctr = stypes.stringToCharP(obsctr) obsref = stypes.stringToCharP(obsref) state = stypes.emptyDoubleVector(6) lt = ctypes.c_double() libspice.spkcvo_c(target, et, outref, refloc, abcorr, obssta, obsepc, obsctr, obsref, state, ctypes.byref(lt)) return stypes.cVectorToPython(state), lt.value
[ "def", "spkcvo", "(", "target", ",", "et", ",", "outref", ",", "refloc", ",", "abcorr", ",", "obssta", ",", "obsepc", ",", "obsctr", ",", "obsref", ")", ":", "target", "=", "stypes", ".", "stringToCharP", "(", "target", ")", "et", "=", "ctypes", ".",...
Return the state of a specified target relative to an "observer," where the observer has constant velocity in a specified reference frame. The observer's state is provided by the calling program rather than by loaded SPK files. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcvo_c.html :param target: Name of target ephemeris object. :type target: str :param et: Observation epoch. :type et: float :param outref: Reference frame of output state. :type outref: str :param refloc: Output reference frame evaluation locus. :type refloc: str :param abcorr: Aberration correction. :type abcorr: str :param obssta: Observer state relative to center of motion. :type obssta: 6-Element Array of floats :param obsepc: Epoch of observer state. :type obsepc: float :param obsctr: Center of motion of observer. :type obsctr: str :param obsref: Frame of observer state. :type obsref: str :return: State of target with respect to observer, One way light time between target and observer. :rtype: tuple
[ "Return", "the", "state", "of", "a", "specified", "target", "relative", "to", "an", "observer", "where", "the", "observer", "has", "constant", "velocity", "in", "a", "specified", "reference", "frame", ".", "The", "observer", "s", "state", "is", "provided", "...
python
train