repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
CyberReboot/vent
vent/extras/rmq_es_connector/rmq_es_connector.py
https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/extras/rmq_es_connector/rmq_es_connector.py#L104-L108
def consume(self): # pragma: no cover """ start consuming rabbitmq messages """ print(' [*] Waiting for logs. To exit press CTRL+C') self.channel.basic_consume(self.queue_name, self.callback) self.channel.start_consuming()
[ "def", "consume", "(", "self", ")", ":", "# pragma: no cover", "print", "(", "' [*] Waiting for logs. To exit press CTRL+C'", ")", "self", ".", "channel", ".", "basic_consume", "(", "self", ".", "queue_name", ",", "self", ".", "callback", ")", "self", ".", "chan...
start consuming rabbitmq messages
[ "start", "consuming", "rabbitmq", "messages" ]
python
train
50.2
common-workflow-language/schema_salad
schema_salad/schema.py
https://github.com/common-workflow-language/schema_salad/blob/608ba207b9058fe0a9c3db161058ab3782eef015/schema_salad/schema.py#L381-L426
def replace_type(items, spec, loader, found, find_embeds=True, deepen=True): # type: (Any, Dict[Text, Any], Loader, Set[Text], bool, bool) -> Any """ Go through and replace types in the 'spec' mapping""" if isinstance(items, MutableMapping): # recursively check these fields for types to replace if items.get("type") in ("record", "enum") and items.get("name"): if items["name"] in found: return items["name"] found.add(items["name"]) if not deepen: return items items = copy.copy(items) if not items.get("name"): items["name"] = get_anon_name(items) for name in ("type", "items", "fields"): if name in items: items[name] = replace_type( items[name], spec, loader, found, find_embeds=find_embeds, deepen=find_embeds) if isinstance(items[name], MutableSequence): items[name] = flatten(items[name]) return items if isinstance(items, MutableSequence): # recursively transform list return [replace_type(i, spec, loader, found, find_embeds=find_embeds, deepen=deepen) for i in items] if isinstance(items, string_types): # found a string which is a symbol corresponding to a type. replace_with = None if items in loader.vocab: # If it's a vocabulary term, first expand it to its fully qualified # URI items = loader.vocab[items] if items in spec: # Look up in specialization map replace_with = spec[items] if replace_with: return replace_type(replace_with, spec, loader, found, find_embeds=find_embeds) found.add(items) return items
[ "def", "replace_type", "(", "items", ",", "spec", ",", "loader", ",", "found", ",", "find_embeds", "=", "True", ",", "deepen", "=", "True", ")", ":", "# type: (Any, Dict[Text, Any], Loader, Set[Text], bool, bool) -> Any", "if", "isinstance", "(", "items", ",", "Mu...
Go through and replace types in the 'spec' mapping
[ "Go", "through", "and", "replace", "types", "in", "the", "spec", "mapping" ]
python
train
39.130435
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L2238-L2266
def wallet_balance_total(self, wallet): """ Returns the sum of all accounts balances in **wallet** :param wallet: Wallet to return sum of balances for :type wallet: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_balance_total( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F" ... ) { "balance": 10000, "pending": 10000 } """ wallet = self._process_value(wallet, 'wallet') payload = {"wallet": wallet} resp = self.call('wallet_balance_total', payload) for k, v in resp.items(): resp[k] = int(v) return resp
[ "def", "wallet_balance_total", "(", "self", ",", "wallet", ")", ":", "wallet", "=", "self", ".", "_process_value", "(", "wallet", ",", "'wallet'", ")", "payload", "=", "{", "\"wallet\"", ":", "wallet", "}", "resp", "=", "self", ".", "call", "(", "'wallet...
Returns the sum of all accounts balances in **wallet** :param wallet: Wallet to return sum of balances for :type wallet: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_balance_total( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F" ... ) { "balance": 10000, "pending": 10000 }
[ "Returns", "the", "sum", "of", "all", "accounts", "balances", "in", "**", "wallet", "**" ]
python
train
23.862069
roboogle/gtkmvc3
gtkmvco/examples/custom_widget/htmltextview.py
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/custom_widget/htmltextview.py#L42-L48
def _parse_css_color(color): '''_parse_css_color(css_color) -> gtk.gdk.Color''' if color.startswith("rgb(") and color.endswith(')'): r, g, b = [int(c)*257 for c in color[4:-1].split(',')] return gtk.gdk.Color(r, g, b) else: return gtk.gdk.color_parse(color)
[ "def", "_parse_css_color", "(", "color", ")", ":", "if", "color", ".", "startswith", "(", "\"rgb(\"", ")", "and", "color", ".", "endswith", "(", "')'", ")", ":", "r", ",", "g", ",", "b", "=", "[", "int", "(", "c", ")", "*", "257", "for", "c", "...
_parse_css_color(css_color) -> gtk.gdk.Color
[ "_parse_css_color", "(", "css_color", ")", "-", ">", "gtk", ".", "gdk", ".", "Color" ]
python
train
41
pyviz/holoviews
holoviews/core/spaces.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/spaces.py#L181-L225
def _dynamic_mul(self, dimensions, other, keys): """ Implements dynamic version of overlaying operation overlaying DynamicMaps and HoloMaps where the key dimensions of one is a strict superset of the other. """ # If either is a HoloMap compute Dimension values if not isinstance(self, DynamicMap) or not isinstance(other, DynamicMap): keys = sorted((d, v) for k in keys for d, v in k) grouped = dict([(g, [v for _, v in group]) for g, group in groupby(keys, lambda x: x[0])]) dimensions = [d(values=grouped[d.name]) for d in dimensions] map_obj = None # Combine streams map_obj = self if isinstance(self, DynamicMap) else other if isinstance(self, DynamicMap) and isinstance(other, DynamicMap): self_streams = util.dimensioned_streams(self) other_streams = util.dimensioned_streams(other) streams = list(util.unique_iterator(self_streams+other_streams)) else: streams = map_obj.streams def dynamic_mul(*key, **kwargs): key_map = {d.name: k for d, k in zip(dimensions, key)} layers = [] try: self_el = self.select(HoloMap, **key_map) if self.kdims else self[()] layers.append(self_el) except KeyError: pass try: other_el = other.select(HoloMap, **key_map) if other.kdims else other[()] layers.append(other_el) except KeyError: pass return Overlay(layers) callback = Callable(dynamic_mul, inputs=[self, other]) callback._is_overlay = True if map_obj: return map_obj.clone(callback=callback, shared_data=False, kdims=dimensions, streams=streams) else: return DynamicMap(callback=callback, kdims=dimensions, streams=streams)
[ "def", "_dynamic_mul", "(", "self", ",", "dimensions", ",", "other", ",", "keys", ")", ":", "# If either is a HoloMap compute Dimension values", "if", "not", "isinstance", "(", "self", ",", "DynamicMap", ")", "or", "not", "isinstance", "(", "other", ",", "Dynami...
Implements dynamic version of overlaying operation overlaying DynamicMaps and HoloMaps where the key dimensions of one is a strict superset of the other.
[ "Implements", "dynamic", "version", "of", "overlaying", "operation", "overlaying", "DynamicMaps", "and", "HoloMaps", "where", "the", "key", "dimensions", "of", "one", "is", "a", "strict", "superset", "of", "the", "other", "." ]
python
train
44.422222
econ-ark/HARK
HARK/FashionVictim/FashionVictimModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/FashionVictim/FashionVictimModel.py#L239-L263
def simOnePrd(self): ''' Simulate one period of the fashion victom model for this type. Each agent receives an idiosyncratic preference shock and chooses whether to change styles (using the optimal decision rule). Parameters ---------- none Returns ------- none ''' pNow = self.pNow sPrev = self.sNow J2Pprob = self.switchFuncJock(pNow) P2Jprob = self.switchFuncPunk(pNow) Shks = self.RNG.rand(self.pop_size) J2P = np.logical_and(sPrev == 0,Shks < J2Pprob) P2J = np.logical_and(sPrev == 1,Shks < P2Jprob) sNow = copy(sPrev) sNow[J2P] = 1 sNow[P2J] = 0 self.sNow = sNow
[ "def", "simOnePrd", "(", "self", ")", ":", "pNow", "=", "self", ".", "pNow", "sPrev", "=", "self", ".", "sNow", "J2Pprob", "=", "self", ".", "switchFuncJock", "(", "pNow", ")", "P2Jprob", "=", "self", ".", "switchFuncPunk", "(", "pNow", ")", "Shks", ...
Simulate one period of the fashion victom model for this type. Each agent receives an idiosyncratic preference shock and chooses whether to change styles (using the optimal decision rule). Parameters ---------- none Returns ------- none
[ "Simulate", "one", "period", "of", "the", "fashion", "victom", "model", "for", "this", "type", ".", "Each", "agent", "receives", "an", "idiosyncratic", "preference", "shock", "and", "chooses", "whether", "to", "change", "styles", "(", "using", "the", "optimal"...
python
train
29.68
esterhui/pypu
pypu/service_facebook.py
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L113-L132
def _update_config(self,directory,filename): """Manages FB config files""" basefilename=os.path.splitext(filename)[0] ext=os.path.splitext(filename)[1].lower() #if filename==LOCATION_FILE: #return self._update_config_location(directory) #FIXME #elif filename==TAG_FILE: #return self._update_config_tags(directory) if filename==SET_FILE: print("%s - Moving photos to album"%(filename)) return self._upload_media(directory,movealbum_request=True) elif filename==MEGAPIXEL_FILE: print("%s - Resizing photos"%(filename)) return self._upload_media(directory,resize_request=True) elif ext in self.FB_META_EXTENSIONS: print("%s - Changing photo title"%(basefilename)) return self._upload_media(directory,basefilename,changetitle_request=True) return False
[ "def", "_update_config", "(", "self", ",", "directory", ",", "filename", ")", ":", "basefilename", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "["...
Manages FB config files
[ "Manages", "FB", "config", "files" ]
python
train
45.3
openwisp/django-freeradius
django_freeradius/base/models.py
https://github.com/openwisp/django-freeradius/blob/a9dd0710327eb33b49dd01097fc3b76048894963/django_freeradius/base/models.py#L524-L531
def set_default(self): """ ensures there's only 1 default group (logic overridable via custom models) """ queryset = self.get_default_queryset() if queryset.exists(): queryset.update(default=False)
[ "def", "set_default", "(", "self", ")", ":", "queryset", "=", "self", ".", "get_default_queryset", "(", ")", "if", "queryset", ".", "exists", "(", ")", ":", "queryset", ".", "update", "(", "default", "=", "False", ")" ]
ensures there's only 1 default group (logic overridable via custom models)
[ "ensures", "there", "s", "only", "1", "default", "group", "(", "logic", "overridable", "via", "custom", "models", ")" ]
python
train
31.25
azogue/dataweb
dataweb/requestweb/__init__.py
https://github.com/azogue/dataweb/blob/085035855df7cef0fe7725bbe9a706832344d946/dataweb/requestweb/__init__.py#L99-L227
def get_data_en_intervalo(d0=None, df=None, date_fmt=DATE_FMT, usar_multithread=USAR_MULTITHREAD, max_threads_requests=MAX_THREADS_REQUESTS, timeout=TIMEOUT, num_retries=NUM_RETRIES, func_procesa_data_dia=None, func_url_data_dia=None, max_act_exec=None, verbose=True, data_extra_request=None): """ Obtiene los datos en bruto de la red realizando múltiples requests al tiempo Procesa los datos en bruto obtenidos de la red convirtiendo a Pandas DataFrame """ def _date(dia_string): if dia_string is None: return dt.date.today() elif type(dia_string) is pd.Timestamp: return dia_string.to_datetime().date() elif type(dia_string) is not dt.date: return dt.datetime.strptime(dia_string, date_fmt).date() else: return dia_string def _procesa_merge_datos_dias(lista_m, dict_data_merge): def _merge_datos_dias(key_tarea_merge, dict_merge_dias): dict_merge_dias[key_tarea_merge] = merge_data(dict_merge_dias[key_tarea_merge]) if num_dias > 1 and usar_multithread: lista_grupos = list() grupos_dias = [lista_m[i:i + DIAS_MERGE_MAX] for i in np.arange(0, num_dias, DIAS_MERGE_MAX)] for grupo in grupos_dias: lista_dfs = list() for key_g in grupo: lista_dfs.append(dict_data_merge[key_g]) lista_grupos.append(lista_dfs) keys_grupos = np.arange(len(lista_grupos)) dict_merge = dict(zip(keys_grupos, lista_grupos)) procesa_tareas_paralelo(keys_grupos, dict_merge, _merge_datos_dias, '\nMERGE DATAFRAMES DE DATOS WEB DIARIOS (%lu GRUPOS)', usar_multithread, MAX_THREADS_MERGE, verbose=verbose) dict_merge_final = {0: [dict_merge[k] for k in dict_merge.keys()]} _merge_datos_dias(0, dict_merge_final) return dict_merge_final[0] else: return merge_data(list(dict_data_merge.values())) def _hay_errores_en_datos_obtenidos(dict_data_obtenida): keys = list(sorted(dict_data_obtenida.keys())) data_es_none = [dict_data_obtenida[k] is None for k in keys] error = False if any(data_es_none): df_err = pd.DataFrame({'key': keys, 'is_bad': data_es_none}) df_err['date'] = df_err['key'].apply(lambda x: pd.Timestamp(x)) df_err['delta'] = (df_err['date'] - df_err['date'].shift(1)).fillna(3600 * 24) df_g = df_err[~df_err['is_bad']].copy() df_g['delta_g'] = (df_g['date'] - df_g['date'].shift(1)).fillna(3600 * 24) # print(df_err) # print(df_err['delta'].describe()) # print(df_g['delta_g'].describe()) if df_g['delta_g'].max() < pd.Timedelta(2, 'D'): bad_days = df_err[df_err['is_bad']]['key'].tolist() if verbose: print('HAY TAREAS NO REALIZADAS ({}):\n{}'.format(len(bad_days), bad_days)) logging.error('HAY TAREAS NO REALIZADAS ({}):\n{}'.format(len(bad_days), bad_days)) error = False else: if verbose: print('NO HAY NINGUNA TAREA REALIZADA!') logging.error('NO HAY NINGUNA TAREA REALIZADA!') bad_days = df_err['key'].tolist() error = True for k in bad_days: dict_data_obtenida.pop(k) return error def _obtiene_request(url, key, headers=None, p_req=None, json_r=False, **kwargs_r): if type(url) is list: results = [request_data_url(u, headers, num_retries, timeout, p_req, json_r, **kwargs_r) for u in url] dict_data[key] = list(zip(*results)) else: stat_response = request_data_url(url, headers, num_retries, timeout, p_req, json_r, **kwargs_r) dict_data[key] = stat_response def _obtiene_data_dia(key, dict_data_responses): url = func_url_data_dia(key) extra = dict_data_responses[key] if type(dict_data_responses[key]) is dict else {} headers = extra.pop('headers', None) json_req = extra.pop('json_req', False) params_request = extra.pop('params_request', None) try: count_process, ok = 0, -1 while count_process < num_retries and ok != 0: _obtiene_request(url, key, headers, params_request, json_req, **extra) data_import, ok = func_procesa_data_dia(key, dict_data_responses[key][1]) if ok == 0: dict_data_responses[key] = data_import elif ok == -2: # Código de salida temprana: count_process = num_retries count_process += 1 if ok != 0: dict_data_responses[key] = None except Exception as e: if verbose: print('PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'.format(e, key, url)) logging.error('PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'.format(e, key, url)) dict_data_responses[key] = None tic_ini = time.time() lista_dias = [dia.strftime(date_fmt) for dia in pd.date_range(_date(d0), _date(df))] if max_act_exec: # BORRAR. Es para limitar el nº de días adquiridos de golpe. lista_dias = lista_dias[:max_act_exec] num_dias = len(lista_dias) if data_extra_request is None: dict_data = dict(zip(lista_dias, np.zeros(num_dias))) else: dict_data = dict(zip(lista_dias, [data_extra_request.copy() for _ in range(num_dias)])) # IMPORTA DATOS Y LOS PROCESA procesa_tareas_paralelo(lista_dias, dict_data, _obtiene_data_dia, '\nPROCESADO DE DATOS WEB DE %lu DÍAS', usar_multithread, max_threads_requests, verbose=verbose) hay_errores = _hay_errores_en_datos_obtenidos(dict_data) # MERGE DATOS # print(len(lista_dias), len(dict_data.keys())) if not hay_errores and num_dias > 0: # data_merge = _procesa_merge_datos_dias(lista_dias, dict_data) data_merge = _procesa_merge_datos_dias(list(sorted(dict_data.keys())), dict_data) str_resumen_import = '\n%lu días importados [Proceso Total %.2f seg, %.4f seg/día]' \ % (num_dias, time.time() - tic_ini, (time.time() - tic_ini) / float(num_dias)) return data_merge, hay_errores, str_resumen_import else: return None, hay_errores, 'ERROR IMPORTANDO!!'
[ "def", "get_data_en_intervalo", "(", "d0", "=", "None", ",", "df", "=", "None", ",", "date_fmt", "=", "DATE_FMT", ",", "usar_multithread", "=", "USAR_MULTITHREAD", ",", "max_threads_requests", "=", "MAX_THREADS_REQUESTS", ",", "timeout", "=", "TIMEOUT", ",", "nu...
Obtiene los datos en bruto de la red realizando múltiples requests al tiempo Procesa los datos en bruto obtenidos de la red convirtiendo a Pandas DataFrame
[ "Obtiene", "los", "datos", "en", "bruto", "de", "la", "red", "realizando", "múltiples", "requests", "al", "tiempo", "Procesa", "los", "datos", "en", "bruto", "obtenidos", "de", "la", "red", "convirtiendo", "a", "Pandas", "DataFrame" ]
python
train
54.674419
mcieslik-mctp/papy
src/papy/graph.py
https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/graph.py#L451-L467
def outgoing_edges(self, node): """ Returns a ``tuple`` of outgoing edges for a **node object**. Arguments: - node(``object``) **node object** present in the graph to be queried for outgoing edges. """ #TODO: pls make outgoig_edges less insane edges = self.edges() out_edges = [] for out_node, in_node in edges: if node is out_node: out_edges.append((out_node, in_node)) return tuple(out_edges)
[ "def", "outgoing_edges", "(", "self", ",", "node", ")", ":", "#TODO: pls make outgoig_edges less insane", "edges", "=", "self", ".", "edges", "(", ")", "out_edges", "=", "[", "]", "for", "out_node", ",", "in_node", "in", "edges", ":", "if", "node", "is", "...
Returns a ``tuple`` of outgoing edges for a **node object**. Arguments: - node(``object``) **node object** present in the graph to be queried for outgoing edges.
[ "Returns", "a", "tuple", "of", "outgoing", "edges", "for", "a", "**", "node", "object", "**", ".", "Arguments", ":", "-", "node", "(", "object", ")", "**", "node", "object", "**", "present", "in", "the", "graph", "to", "be", "queried", "for", "outgoing...
python
train
31.176471
atl/py-smartdc
smartdc/machine.py
https://github.com/atl/py-smartdc/blob/cc5cd5910e19004cc46e376ce035affe28fc798e/smartdc/machine.py#L462-L477
def delete_all_metadata(self): """ :: DELETE /:login/machines/:id/metadata :Returns: current metadata :rtype: empty :py:class:`dict` Deletes all the metadata stored for this machine. Also explicitly requests and returns the machine metadata so that the local copy stays synchronized. """ j, r = self.datacenter.request('DELETE', self.path + '/metadata') r.raise_for_status() return self.get_metadata()
[ "def", "delete_all_metadata", "(", "self", ")", ":", "j", ",", "r", "=", "self", ".", "datacenter", ".", "request", "(", "'DELETE'", ",", "self", ".", "path", "+", "'/metadata'", ")", "r", ".", "raise_for_status", "(", ")", "return", "self", ".", "get_...
:: DELETE /:login/machines/:id/metadata :Returns: current metadata :rtype: empty :py:class:`dict` Deletes all the metadata stored for this machine. Also explicitly requests and returns the machine metadata so that the local copy stays synchronized.
[ "::", "DELETE", "/", ":", "login", "/", "machines", "/", ":", "id", "/", "metadata", ":", "Returns", ":", "current", "metadata", ":", "rtype", ":", "empty", ":", "py", ":", "class", ":", "dict", "Deletes", "all", "the", "metadata", "stored", "for", "...
python
train
32.1875
iotile/coretools
iotileemulate/iotile/emulate/virtual/peripheral_tile.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/virtual/peripheral_tile.py#L95-L107
def _handle_reset(self): """Reset this tile. This process needs to trigger the peripheral tile to reregister itself with the controller and get new configuration variables. It also needs to clear app_running. """ self._registered.clear() self._start_received.clear() self._hosted_app_running.clear() super(EmulatedPeripheralTile, self)._handle_reset()
[ "def", "_handle_reset", "(", "self", ")", ":", "self", ".", "_registered", ".", "clear", "(", ")", "self", ".", "_start_received", ".", "clear", "(", ")", "self", ".", "_hosted_app_running", ".", "clear", "(", ")", "super", "(", "EmulatedPeripheralTile", "...
Reset this tile. This process needs to trigger the peripheral tile to reregister itself with the controller and get new configuration variables. It also needs to clear app_running.
[ "Reset", "this", "tile", "." ]
python
train
31.923077
emlazzarin/acrylic
acrylic/datatable.py
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L815-L820
def wheregreater(self, fieldname, value): """ Returns a new DataTable with rows only where the value at `fieldname` > `value`. """ return self.mask([elem > value for elem in self[fieldname]])
[ "def", "wheregreater", "(", "self", ",", "fieldname", ",", "value", ")", ":", "return", "self", ".", "mask", "(", "[", "elem", ">", "value", "for", "elem", "in", "self", "[", "fieldname", "]", "]", ")" ]
Returns a new DataTable with rows only where the value at `fieldname` > `value`.
[ "Returns", "a", "new", "DataTable", "with", "rows", "only", "where", "the", "value", "at", "fieldname", ">", "value", "." ]
python
train
37.666667
saltstack/salt
salt/proxy/ssh_sample.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L150-L162
def package_install(name, **kwargs): ''' Install a "package" on the ssh server ''' cmd = 'pkg_install ' + name if kwargs.get('version', False): cmd += ' ' + kwargs['version'] # Send the command to execute out, err = DETAILS['server'].sendline(cmd) # "scrape" the output and return the right fields as a dict return parse(out)
[ "def", "package_install", "(", "name", ",", "*", "*", "kwargs", ")", ":", "cmd", "=", "'pkg_install '", "+", "name", "if", "kwargs", ".", "get", "(", "'version'", ",", "False", ")", ":", "cmd", "+=", "' '", "+", "kwargs", "[", "'version'", "]", "# Se...
Install a "package" on the ssh server
[ "Install", "a", "package", "on", "the", "ssh", "server" ]
python
train
27.615385
Garee/pytodoist
pytodoist/todoist.py
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/todoist.py#L377-L386
def _sync_notes(self, notes_json): """"Populate the user's notes from a JSON encoded list.""" for note_json in notes_json: note_id = note_json['id'] task_id = note_json['item_id'] if task_id not in self.tasks: # ignore orphan notes continue task = self.tasks[task_id] self.notes[note_id] = Note(note_json, task)
[ "def", "_sync_notes", "(", "self", ",", "notes_json", ")", ":", "for", "note_json", "in", "notes_json", ":", "note_id", "=", "note_json", "[", "'id'", "]", "task_id", "=", "note_json", "[", "'item_id'", "]", "if", "task_id", "not", "in", "self", ".", "ta...
Populate the user's notes from a JSON encoded list.
[ "Populate", "the", "user", "s", "notes", "from", "a", "JSON", "encoded", "list", "." ]
python
train
41
xtrementl/focus
focus/task.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/task.py#L205-L248
def create(self, task_name, clone_task=None): """ Creates a new task directory. `task_name` Task name. `clone_task` Existing task name to use as a template for new task. Returns boolean. * Raises ``Value`` if task name is invalid, ``TaskExists`` if task already exists, or ``TaskNotFound`` if task for `clone_from` doesn't exist. """ if not task_name or task_name.startswith('-'): raise ValueError('Invalid task name') try: task_dir = self._get_task_dir(task_name) if self.exists(task_dir): raise errors.TaskExists(task_name) task_cfg = self.get_config_path(task_name) if clone_task: if not self.exists(clone_task): raise errors.TaskNotFound(clone_task) # copy task directory shutil.copytree(self._get_task_dir(clone_task), task_dir) else: os.mkdir(task_dir) # write default task configuration shutil.copy(self._default_task_config, task_cfg) return True except OSError: shutil.rmtree(task_dir, ignore_errors=True) return False
[ "def", "create", "(", "self", ",", "task_name", ",", "clone_task", "=", "None", ")", ":", "if", "not", "task_name", "or", "task_name", ".", "startswith", "(", "'-'", ")", ":", "raise", "ValueError", "(", "'Invalid task name'", ")", "try", ":", "task_dir", ...
Creates a new task directory. `task_name` Task name. `clone_task` Existing task name to use as a template for new task. Returns boolean. * Raises ``Value`` if task name is invalid, ``TaskExists`` if task already exists, or ``TaskNotFound`` if task for `clone_from` doesn't exist.
[ "Creates", "a", "new", "task", "directory", "." ]
python
train
29.295455
Kozea/pygal
pygal/svg.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/svg.py#L87-L131
def add_styles(self): """Add the css to the svg""" colors = self.graph.style.get_colors(self.id, self.graph._order) strokes = self.get_strokes() all_css = [] auto_css = ['file://base.css'] if self.graph.style._google_fonts: auto_css.append( '//fonts.googleapis.com/css?family=%s' % quote_plus('|'.join(self.graph.style._google_fonts)) ) for css in auto_css + list(self.graph.css): css_text = None if css.startswith('inline:'): css_text = css[len('inline:'):] elif css.startswith('file://'): css = css[len('file://'):] if not os.path.exists(css): css = os.path.join(os.path.dirname(__file__), 'css', css) with io.open(css, encoding='utf-8') as f: css_text = template( f.read(), style=self.graph.style, colors=colors, strokes=strokes, id=self.id ) if css_text is not None: if not self.graph.pretty_print: css_text = minify_css(css_text) all_css.append(css_text) else: if css.startswith('//') and self.graph.force_uri_protocol: css = '%s:%s' % (self.graph.force_uri_protocol, css) self.processing_instructions.append( etree.PI(u('xml-stylesheet'), u('href="%s"' % css)) ) self.node( self.defs, 'style', type='text/css' ).text = '\n'.join(all_css)
[ "def", "add_styles", "(", "self", ")", ":", "colors", "=", "self", ".", "graph", ".", "style", ".", "get_colors", "(", "self", ".", "id", ",", "self", ".", "graph", ".", "_order", ")", "strokes", "=", "self", ".", "get_strokes", "(", ")", "all_css", ...
Add the css to the svg
[ "Add", "the", "css", "to", "the", "svg" ]
python
train
37.511111
theislab/scanpy
scanpy/neighbors/__init__.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/neighbors/__init__.py#L765-L818
def compute_eigen(self, n_comps=15, sym=None, sort='decrease'): """Compute eigen decomposition of transition matrix. Parameters ---------- n_comps : `int` Number of eigenvalues/vectors to be computed, set `n_comps = 0` if you need all eigenvectors. sym : `bool` Instead of computing the eigendecomposition of the assymetric transition matrix, computed the eigendecomposition of the symmetric Ktilde matrix. matrix : sparse matrix, np.ndarray, optional (default: `.connectivities`) Matrix to diagonalize. Merely for testing and comparison purposes. Returns ------- Writes the following attributes. eigen_values : numpy.ndarray Eigenvalues of transition matrix. eigen_basis : numpy.ndarray Matrix of eigenvectors (stored in columns). `.eigen_basis` is projection of data matrix on right eigenvectors, that is, the projection on the diffusion components. these are simply the components of the right eigenvectors and can directly be used for plotting. """ np.set_printoptions(precision=10) if self._transitions_sym is None: raise ValueError('Run `.compute_transitions` first.') matrix = self._transitions_sym # compute the spectrum if n_comps == 0: evals, evecs = scipy.linalg.eigh(matrix) else: n_comps = min(matrix.shape[0]-1, n_comps) # ncv = max(2 * n_comps + 1, int(np.sqrt(matrix.shape[0]))) ncv = None which = 'LM' if sort == 'decrease' else 'SM' # it pays off to increase the stability with a bit more precision matrix = matrix.astype(np.float64) evals, evecs = scipy.sparse.linalg.eigsh(matrix, k=n_comps, which=which, ncv=ncv) evals, evecs = evals.astype(np.float32), evecs.astype(np.float32) if sort == 'decrease': evals = evals[::-1] evecs = evecs[:, ::-1] logg.info(' eigenvalues of transition matrix\n' ' {}'.format(str(evals).replace('\n', '\n '))) if self._number_connected_components > len(evals)/2: logg.warn('Transition matrix has many disconnected components!') self._eigen_values = evals self._eigen_basis = evecs
[ "def", "compute_eigen", "(", "self", ",", "n_comps", "=", "15", ",", "sym", "=", "None", ",", "sort", "=", "'decrease'", ")", ":", "np", ".", "set_printoptions", "(", "precision", "=", "10", ")", "if", "self", ".", "_transitions_sym", "is", "None", ":"...
Compute eigen decomposition of transition matrix. Parameters ---------- n_comps : `int` Number of eigenvalues/vectors to be computed, set `n_comps = 0` if you need all eigenvectors. sym : `bool` Instead of computing the eigendecomposition of the assymetric transition matrix, computed the eigendecomposition of the symmetric Ktilde matrix. matrix : sparse matrix, np.ndarray, optional (default: `.connectivities`) Matrix to diagonalize. Merely for testing and comparison purposes. Returns ------- Writes the following attributes. eigen_values : numpy.ndarray Eigenvalues of transition matrix. eigen_basis : numpy.ndarray Matrix of eigenvectors (stored in columns). `.eigen_basis` is projection of data matrix on right eigenvectors, that is, the projection on the diffusion components. these are simply the components of the right eigenvectors and can directly be used for plotting.
[ "Compute", "eigen", "decomposition", "of", "transition", "matrix", "." ]
python
train
45.351852
estnltk/estnltk
estnltk/wordnet/eurown.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/eurown.py#L1739-L1748
def addUsage_Label(self,usage_label): '''Appends one Usage_Label to usage_labels ''' if isinstance(usage_label, Usage_Label): self.usage_labels.append(usage_label) else: raise (Usage_LabelError, 'usage_label Type should be Usage_Label, not %s' % type( usage_label) )
[ "def", "addUsage_Label", "(", "self", ",", "usage_label", ")", ":", "if", "isinstance", "(", "usage_label", ",", "Usage_Label", ")", ":", "self", ".", "usage_labels", ".", "append", "(", "usage_label", ")", "else", ":", "raise", "(", "Usage_LabelError", ",",...
Appends one Usage_Label to usage_labels
[ "Appends", "one", "Usage_Label", "to", "usage_labels" ]
python
train
37.1
benjamin-hodgson/asynqp
src/asynqp/__init__.py
https://github.com/benjamin-hodgson/asynqp/blob/ea8630d1803d10d4fd64b1a0e50f3097710b34d1/src/asynqp/__init__.py#L22-L86
def connect(host='localhost', port=5672, username='guest', password='guest', virtual_host='/', on_connection_close=None, *, loop=None, sock=None, **kwargs): """ Connect to an AMQP server on the given host and port. Log in to the given virtual host using the supplied credentials. This function is a :ref:`coroutine <coroutine>`. :param str host: the host server to connect to. :param int port: the port which the AMQP server is listening on. :param str username: the username to authenticate with. :param str password: the password to authenticate with. :param str virtual_host: the AMQP virtual host to connect to. :param func on_connection_close: function called after connection lost. :keyword BaseEventLoop loop: An instance of :class:`~asyncio.BaseEventLoop` to use. (Defaults to :func:`asyncio.get_event_loop()`) :keyword socket sock: A :func:`~socket.socket` instance to use for the connection. This is passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`. If ``sock`` is supplied then ``host`` and ``port`` will be ignored. Further keyword arguments are passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`. This function will set TCP_NODELAY on TCP and TCP6 sockets either on supplied ``sock`` or created one. :return: the :class:`Connection` object. """ from .protocol import AMQP from .routing import Dispatcher from .connection import open_connection loop = asyncio.get_event_loop() if loop is None else loop if sock is None: kwargs['host'] = host kwargs['port'] = port else: kwargs['sock'] = sock dispatcher = Dispatcher() def protocol_factory(): return AMQP(dispatcher, loop, close_callback=on_connection_close) transport, protocol = yield from loop.create_connection(protocol_factory, **kwargs) # RPC-like applications require TCP_NODELAY in order to acheive # minimal response time. Actually, this library send data in one # big chunk and so this will not affect TCP-performance. sk = transport.get_extra_info('socket') # 1. Unfortunatelly we cannot check socket type (sk.type == socket.SOCK_STREAM). https://bugs.python.org/issue21327 # 2. Proto remains zero, if not specified at creation of socket if (sk.family in (socket.AF_INET, socket.AF_INET6)) and (sk.proto in (0, socket.IPPROTO_TCP)): sk.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) connection_info = { 'username': username, 'password': password, 'virtual_host': virtual_host } connection = yield from open_connection( loop, transport, protocol, dispatcher, connection_info) return connection
[ "def", "connect", "(", "host", "=", "'localhost'", ",", "port", "=", "5672", ",", "username", "=", "'guest'", ",", "password", "=", "'guest'", ",", "virtual_host", "=", "'/'", ",", "on_connection_close", "=", "None", ",", "*", ",", "loop", "=", "None", ...
Connect to an AMQP server on the given host and port. Log in to the given virtual host using the supplied credentials. This function is a :ref:`coroutine <coroutine>`. :param str host: the host server to connect to. :param int port: the port which the AMQP server is listening on. :param str username: the username to authenticate with. :param str password: the password to authenticate with. :param str virtual_host: the AMQP virtual host to connect to. :param func on_connection_close: function called after connection lost. :keyword BaseEventLoop loop: An instance of :class:`~asyncio.BaseEventLoop` to use. (Defaults to :func:`asyncio.get_event_loop()`) :keyword socket sock: A :func:`~socket.socket` instance to use for the connection. This is passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`. If ``sock`` is supplied then ``host`` and ``port`` will be ignored. Further keyword arguments are passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`. This function will set TCP_NODELAY on TCP and TCP6 sockets either on supplied ``sock`` or created one. :return: the :class:`Connection` object.
[ "Connect", "to", "an", "AMQP", "server", "on", "the", "given", "host", "and", "port", "." ]
python
train
42.984615
hatemile/hatemile-for-python
hatemile/implementation/css.py
https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/css.py#L568-L587
def _create_aural_content_element(self, content, data_property_value): """ Create a element to show the content, only to aural displays. :param content: The text content of element. :type content: str :param data_property_value: The value of custom attribute used to identify the fix. :type data_property_value: str :return: The element to show the content. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement """ content_element = self._create_content_element( content, data_property_value ) content_element.set_attribute('unselectable', 'on') content_element.set_attribute('class', 'screen-reader-only') return content_element
[ "def", "_create_aural_content_element", "(", "self", ",", "content", ",", "data_property_value", ")", ":", "content_element", "=", "self", ".", "_create_content_element", "(", "content", ",", "data_property_value", ")", "content_element", ".", "set_attribute", "(", "'...
Create a element to show the content, only to aural displays. :param content: The text content of element. :type content: str :param data_property_value: The value of custom attribute used to identify the fix. :type data_property_value: str :return: The element to show the content. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
[ "Create", "a", "element", "to", "show", "the", "content", "only", "to", "aural", "displays", "." ]
python
train
39.4
gwastro/pycbc-glue
pycbc_glue/ligolw/utils/ligolw_add.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/ligolw_add.py#L111-L125
def merge_ligolws(elem): """ Merge all LIGO_LW elements that are immediate children of elem by appending their children to the first. """ ligolws = [child for child in elem.childNodes if child.tagName == ligolw.LIGO_LW.tagName] if ligolws: dest = ligolws.pop(0) for src in ligolws: # copy children; LIGO_LW elements have no attributes map(dest.appendChild, src.childNodes) # unlink from parent if src.parentNode is not None: src.parentNode.removeChild(src) return elem
[ "def", "merge_ligolws", "(", "elem", ")", ":", "ligolws", "=", "[", "child", "for", "child", "in", "elem", ".", "childNodes", "if", "child", ".", "tagName", "==", "ligolw", ".", "LIGO_LW", ".", "tagName", "]", "if", "ligolws", ":", "dest", "=", "ligolw...
Merge all LIGO_LW elements that are immediate children of elem by appending their children to the first.
[ "Merge", "all", "LIGO_LW", "elements", "that", "are", "immediate", "children", "of", "elem", "by", "appending", "their", "children", "to", "the", "first", "." ]
python
train
32.133333
pingali/dgit
dgitcore/datasets/history.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/datasets/history.py#L196-L278
def get_diffs(history): """ Look at files and compute the diffs intelligently """ # First get all possible representations mgr = plugins_get_mgr() keys = mgr.search('representation')['representation'] representations = [mgr.get_by_key('representation', k) for k in keys] for i in range(len(history)): if i+1 > len(history) - 1: continue prev = history[i] curr = history[i+1] #print(prev['subject'], "==>", curr['subject']) #print(curr['changes']) for c in curr['changes']: path = c['path'] # Skip the metadata file if c['path'].endswith('datapackage.json'): continue # Find a handler for this kind of file... handler = None for r in representations: if r.can_process(path): handler = r break if handler is None: continue # print(path, "being handled by", handler) v1_hex = prev['commit'] v2_hex = curr['commit'] temp1 = tempfile.mkdtemp(prefix="dgit-diff-") try: for h in [v1_hex, v2_hex]: filename = '{}/{}/checkout.tar'.format(temp1, h) try: os.makedirs(os.path.dirname(filename)) except: pass extractcmd = ['git', 'archive', '-o', filename, h, path] output = run(extractcmd) if 'fatal' in output: raise Exception("File not present in commit") with cd(os.path.dirname(filename)): cmd = ['tar', 'xvf', 'checkout.tar'] output = run(cmd) if 'fatal' in output: print("Cleaning up - fatal 1", temp1) shutil.rmtree(temp1) continue # Check to make sure that path1 = os.path.join(temp1, v1_hex, path) path2 = os.path.join(temp1, v2_hex, path) if not os.path.exists(path1) or not os.path.exists(path2): # print("One of the two output files is missing") shutil.rmtree(temp1) continue #print(path1, path2) # Now call the handler diff = handler.get_diff(path1, path2) # print("Inserting diff", diff) c['diff'] = diff except Exception as e: #traceback.print_exc() #print("Cleaning up - Exception ", temp1) shutil.rmtree(temp1)
[ "def", "get_diffs", "(", "history", ")", ":", "# First get all possible representations", "mgr", "=", "plugins_get_mgr", "(", ")", "keys", "=", "mgr", ".", "search", "(", "'representation'", ")", "[", "'representation'", "]", "representations", "=", "[", "mgr", ...
Look at files and compute the diffs intelligently
[ "Look", "at", "files", "and", "compute", "the", "diffs", "intelligently" ]
python
valid
33.578313
renatopp/liac-arff
arff.py
https://github.com/renatopp/liac-arff/blob/6771f4cdd13d0eca74d3ebbaa6290297dd0a381d/arff.py#L484-L512
def encode_data(self, data, attributes): '''(INTERNAL) Encodes a line of data. Data instances follow the csv format, i.e, attribute values are delimited by commas. After converted from csv. :param data: a list of values. :param attributes: a list of attributes. Used to check if data is valid. :return: a string with the encoded data line. ''' current_row = 0 for inst in data: if len(inst) != len(attributes): raise BadObject( 'Instance %d has %d attributes, expected %d' % (current_row, len(inst), len(attributes)) ) new_data = [] for value in inst: if value is None or value == u'' or value != value: s = '?' else: s = encode_string(unicode(value)) new_data.append(s) current_row += 1 yield u','.join(new_data)
[ "def", "encode_data", "(", "self", ",", "data", ",", "attributes", ")", ":", "current_row", "=", "0", "for", "inst", "in", "data", ":", "if", "len", "(", "inst", ")", "!=", "len", "(", "attributes", ")", ":", "raise", "BadObject", "(", "'Instance %d ha...
(INTERNAL) Encodes a line of data. Data instances follow the csv format, i.e, attribute values are delimited by commas. After converted from csv. :param data: a list of values. :param attributes: a list of attributes. Used to check if data is valid. :return: a string with the encoded data line.
[ "(", "INTERNAL", ")", "Encodes", "a", "line", "of", "data", "." ]
python
train
33.896552
DallasMorningNews/django-datafreezer
datafreezer/views.py
https://github.com/DallasMorningNews/django-datafreezer/blob/982dcf2015c80a280f1a093e32977cb71d4ea7aa/datafreezer/views.py#L388-L414
def home(request): """Renders Datafreezer homepage. Includes recent uploads.""" recent_uploads = Dataset.objects.order_by('-date_uploaded')[:11] email_list = [upload.uploaded_by.strip() for upload in recent_uploads] # print all_staff emails_names = grab_names_from_emails(email_list) # print emails_names for upload in recent_uploads: for item in emails_names: if upload.uploaded_by == item: upload.fullName = emails_names[item] for upload in recent_uploads: if not hasattr(upload, 'fullName'): upload.fullName = upload.uploaded_by return render( request, 'datafreezer/home.html', { 'recent_uploads': recent_uploads, 'heading': 'Most Recent Uploads' } )
[ "def", "home", "(", "request", ")", ":", "recent_uploads", "=", "Dataset", ".", "objects", ".", "order_by", "(", "'-date_uploaded'", ")", "[", ":", "11", "]", "email_list", "=", "[", "upload", ".", "uploaded_by", ".", "strip", "(", ")", "for", "upload", ...
Renders Datafreezer homepage. Includes recent uploads.
[ "Renders", "Datafreezer", "homepage", ".", "Includes", "recent", "uploads", "." ]
python
train
29.037037
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_genobstacles.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_genobstacles.py#L93-L98
def move(self, bearing, distance): '''move position by bearing and distance''' lat = self.pkt['I105']['Lat']['val'] lon = self.pkt['I105']['Lon']['val'] (lat, lon) = mp_util.gps_newpos(lat, lon, bearing, distance) self.setpos(lat, lon)
[ "def", "move", "(", "self", ",", "bearing", ",", "distance", ")", ":", "lat", "=", "self", ".", "pkt", "[", "'I105'", "]", "[", "'Lat'", "]", "[", "'val'", "]", "lon", "=", "self", ".", "pkt", "[", "'I105'", "]", "[", "'Lon'", "]", "[", "'val'"...
move position by bearing and distance
[ "move", "position", "by", "bearing", "and", "distance" ]
python
train
45
CalebBell/fluids
fluids/units.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/units.py#L51-L58
def func_args(func): '''Basic function which returns a tuple of arguments of a function or method. ''' try: return tuple(inspect.signature(func).parameters) except: return tuple(inspect.getargspec(func).args)
[ "def", "func_args", "(", "func", ")", ":", "try", ":", "return", "tuple", "(", "inspect", ".", "signature", "(", "func", ")", ".", "parameters", ")", "except", ":", "return", "tuple", "(", "inspect", ".", "getargspec", "(", "func", ")", ".", "args", ...
Basic function which returns a tuple of arguments of a function or method.
[ "Basic", "function", "which", "returns", "a", "tuple", "of", "arguments", "of", "a", "function", "or", "method", "." ]
python
train
29.625
junzis/pyModeS
pyModeS/decoder/bds/bds05.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/bds05.py#L132-L161
def altitude(msg): """Decode aircraft altitude Args: msg (string): 28 bytes hexadecimal message string Returns: int: altitude in feet """ tc = common.typecode(msg) if tc<9 or tc==19 or tc>22: raise RuntimeError("%s: Not a airborn position message" % msg) mb = common.hex2bin(msg)[32:] if tc < 19: # barometric altitude q = mb[15] if q: n = common.bin2int(mb[8:15]+mb[16:20]) alt = n * 25 - 1000 else: alt = None else: # GNSS altitude, meters -> feet alt = common.bin2int(mb[8:20]) * 3.28084 return alt
[ "def", "altitude", "(", "msg", ")", ":", "tc", "=", "common", ".", "typecode", "(", "msg", ")", "if", "tc", "<", "9", "or", "tc", "==", "19", "or", "tc", ">", "22", ":", "raise", "RuntimeError", "(", "\"%s: Not a airborn position message\"", "%", "msg"...
Decode aircraft altitude Args: msg (string): 28 bytes hexadecimal message string Returns: int: altitude in feet
[ "Decode", "aircraft", "altitude" ]
python
train
20.9
django-admin-tools/django-admin-tools
admin_tools/dashboard/modules.py
https://github.com/django-admin-tools/django-admin-tools/blob/ba6f46f51ebd84fcf84f2f79ec9487f45452d79b/admin_tools/dashboard/modules.py#L251-L275
def is_empty(self): """ A group of modules is considered empty if it has no children or if all its children are empty. >>> from admin_tools.dashboard.modules import DashboardModule, LinkList >>> mod = Group() >>> mod.is_empty() True >>> mod.children.append(DashboardModule()) >>> mod.is_empty() True >>> mod.children.append(LinkList('links', children=[ ... {'title': 'example1', 'url': 'http://example.com'}, ... {'title': 'example2', 'url': 'http://example.com'}, ... ])) >>> mod.is_empty() False """ if super(Group, self).is_empty(): return True for child in self.children: if not child.is_empty(): return False return True
[ "def", "is_empty", "(", "self", ")", ":", "if", "super", "(", "Group", ",", "self", ")", ".", "is_empty", "(", ")", ":", "return", "True", "for", "child", "in", "self", ".", "children", ":", "if", "not", "child", ".", "is_empty", "(", ")", ":", "...
A group of modules is considered empty if it has no children or if all its children are empty. >>> from admin_tools.dashboard.modules import DashboardModule, LinkList >>> mod = Group() >>> mod.is_empty() True >>> mod.children.append(DashboardModule()) >>> mod.is_empty() True >>> mod.children.append(LinkList('links', children=[ ... {'title': 'example1', 'url': 'http://example.com'}, ... {'title': 'example2', 'url': 'http://example.com'}, ... ])) >>> mod.is_empty() False
[ "A", "group", "of", "modules", "is", "considered", "empty", "if", "it", "has", "no", "children", "or", "if", "all", "its", "children", "are", "empty", "." ]
python
train
32.32
materialsproject/pymatgen
pymatgen/analysis/defects/corrections.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/defects/corrections.py#L249-L283
def plot(self, axis, title=None, saved=False): """ Plots the planar average electrostatic potential against the Long range and short range models from Freysoldt """ x = self.metadata['pot_plot_data'][axis]['x'] v_R = self.metadata['pot_plot_data'][axis]['Vr'] dft_diff = self.metadata['pot_plot_data'][axis]['dft_diff'] final_shift = self.metadata['pot_plot_data'][axis]['final_shift'] check = self.metadata['pot_plot_data'][axis]['check'] plt.figure() plt.clf() plt.plot(x, v_R, c="green", zorder=1, label="long range from model") plt.plot(x, dft_diff, c="red", label="DFT locpot diff") plt.plot(x, final_shift, c="blue", label="short range (aligned)") tmpx = [x[i] for i in range(check[0], check[1])] plt.fill_between(tmpx, -100, 100, facecolor="red", alpha=0.15, label="sampling region") plt.xlim(round(x[0]), round(x[-1])) ymin = min(min(v_R), min(dft_diff), min(final_shift)) ymax = max(max(v_R), max(dft_diff), max(final_shift)) plt.ylim(-0.2 + ymin, 0.2 + ymax) plt.xlabel("distance along axis ($\AA$)", fontsize=15) plt.ylabel("Potential (V)", fontsize=15) plt.legend(loc=9) plt.axhline(y=0, linewidth=0.2, color="black") plt.title(str(title) + " defect potential", fontsize=18) plt.xlim(0, max(x)) if saved: plt.savefig(str(title) + "FreyplnravgPlot.pdf") else: return plt
[ "def", "plot", "(", "self", ",", "axis", ",", "title", "=", "None", ",", "saved", "=", "False", ")", ":", "x", "=", "self", ".", "metadata", "[", "'pot_plot_data'", "]", "[", "axis", "]", "[", "'x'", "]", "v_R", "=", "self", ".", "metadata", "[",...
Plots the planar average electrostatic potential against the Long range and short range models from Freysoldt
[ "Plots", "the", "planar", "average", "electrostatic", "potential", "against", "the", "Long", "range", "and", "short", "range", "models", "from", "Freysoldt" ]
python
train
42.628571
Nic30/hwt
hwt/hdl/frameTmplUtils.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/frameTmplUtils.py#L222-L247
def groupByWordIndex(self, transaction: 'TransTmpl', offset: int): """ Group transaction parts splited on words to words :param transaction: TransTmpl instance which parts should be grupped into words :return: generator of tuples (wordIndex, list of transaction parts in this word) """ actualW = None partsInWord = [] wordWidth = self.wordWidth for item in self.splitOnWords(transaction, offset): _actualW = item.startOfPart // wordWidth if actualW is None: actualW = _actualW partsInWord.append(item) elif _actualW > actualW: yield (actualW, partsInWord) actualW = _actualW partsInWord = [item, ] else: partsInWord.append(item) if partsInWord: yield (actualW, partsInWord)
[ "def", "groupByWordIndex", "(", "self", ",", "transaction", ":", "'TransTmpl'", ",", "offset", ":", "int", ")", ":", "actualW", "=", "None", "partsInWord", "=", "[", "]", "wordWidth", "=", "self", ".", "wordWidth", "for", "item", "in", "self", ".", "spli...
Group transaction parts splited on words to words :param transaction: TransTmpl instance which parts should be grupped into words :return: generator of tuples (wordIndex, list of transaction parts in this word)
[ "Group", "transaction", "parts", "splited", "on", "words", "to", "words" ]
python
test
35
Asana/python-asana
asana/client.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/client.py#L187-L208
def _parse_api_options(self, options, query_string=False): """Select API options out of the provided options object. Selects API string options out of the provided options object and formats for either request body (default) or query string. """ api_options = self._select_options(options, self.API_OPTIONS) if query_string: # Prefix all options with "opt_" query_api_options = {} for key in api_options: # Transform list/tuples into comma separated list if isinstance(api_options[key], (list, tuple)): query_api_options[ 'opt_' + key] = ','.join(api_options[key]) else: query_api_options[ 'opt_' + key] = api_options[key] return query_api_options else: return api_options
[ "def", "_parse_api_options", "(", "self", ",", "options", ",", "query_string", "=", "False", ")", ":", "api_options", "=", "self", ".", "_select_options", "(", "options", ",", "self", ".", "API_OPTIONS", ")", "if", "query_string", ":", "# Prefix all options with...
Select API options out of the provided options object. Selects API string options out of the provided options object and formats for either request body (default) or query string.
[ "Select", "API", "options", "out", "of", "the", "provided", "options", "object", "." ]
python
train
41.136364
shoyer/h5netcdf
h5netcdf/_chainmap.py
https://github.com/shoyer/h5netcdf/blob/3ae35cd58297281a1dc69c46fb0b315a0007ac2b/h5netcdf/_chainmap.py#L131-L136
def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key))
[ "def", "pop", "(", "self", ",", "key", ",", "*", "args", ")", ":", "try", ":", "return", "self", ".", "maps", "[", "0", "]", ".", "pop", "(", "key", ",", "*", "args", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "'Key not found in the ...
Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].
[ "Remove", "*", "key", "*", "from", "maps", "[", "0", "]", "and", "return", "its", "value", ".", "Raise", "KeyError", "if", "*", "key", "*", "not", "in", "maps", "[", "0", "]", "." ]
python
train
48
bram85/topydo
topydo/ui/cli/CLI.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/cli/CLI.py#L45-L64
def run(self): """ Main entry function. """ args = self._process_flags() self.todofile = TodoFile.TodoFile(config().todotxt()) self.todolist = TodoList.TodoList(self.todofile.read()) try: (subcommand, args) = get_subcommand(args) except ConfigError as ce: error('Error: ' + str(ce) + '. Check your aliases configuration') sys.exit(1) if subcommand is None: CLIApplicationBase._usage() if self._execute(subcommand, args) == False: sys.exit(1) else: self._post_execute()
[ "def", "run", "(", "self", ")", ":", "args", "=", "self", ".", "_process_flags", "(", ")", "self", ".", "todofile", "=", "TodoFile", ".", "TodoFile", "(", "config", "(", ")", ".", "todotxt", "(", ")", ")", "self", ".", "todolist", "=", "TodoList", ...
Main entry function.
[ "Main", "entry", "function", "." ]
python
train
29.85
opereto/pyopereto
pyopereto/client.py
https://github.com/opereto/pyopereto/blob/16ca987738a7e1b82b52b0b099794a74ed557223/pyopereto/client.py#L802-L819
def modify_agent_properties(self, agent_id, key_value_map={}): ''' modify_agent_properties(self, agent_id, key_value_map={}) Modify properties of an agent. If properties do not exists, they will be created :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent * *key_value_map* (`object`) -- Key value map of properties to change * *value* (`string`) -- New Value of the property to change :Example: .. code-block:: python opereto_client.modify_agent_properties('my_agent_id', {"mykey": "myvalue", "mykey2": "myvalue2"}) ''' return self._call_rest_api('post', '/agents/'+agent_id+'/properties', data=key_value_map, error='Failed to modify agent [%s] properties'%agent_id)
[ "def", "modify_agent_properties", "(", "self", ",", "agent_id", ",", "key_value_map", "=", "{", "}", ")", ":", "return", "self", ".", "_call_rest_api", "(", "'post'", ",", "'/agents/'", "+", "agent_id", "+", "'/properties'", ",", "data", "=", "key_value_map", ...
modify_agent_properties(self, agent_id, key_value_map={}) Modify properties of an agent. If properties do not exists, they will be created :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent * *key_value_map* (`object`) -- Key value map of properties to change * *value* (`string`) -- New Value of the property to change :Example: .. code-block:: python opereto_client.modify_agent_properties('my_agent_id', {"mykey": "myvalue", "mykey2": "myvalue2"})
[ "modify_agent_properties", "(", "self", "agent_id", "key_value_map", "=", "{}", ")" ]
python
train
43.111111
tanghaibao/jcvi
jcvi/annotation/pasa.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/pasa.py#L336-L507
def consolidate(args): """ %prog consolidate gffile1 gffile2 ... > consolidated.out Given 2 or more gff files generated by pasa annotation comparison, iterate through each locus (shared locus name or overlapping CDS) and identify same/different isoforms (shared splicing structure) across the input datasets. If `slop` is enabled, consolidation will collapse any variation in terminal UTR lengths, keeping the longest as representative. """ from jcvi.formats.base import longest_unique_prefix from jcvi.formats.gff import make_index, match_subfeats from jcvi.utils.cbook import AutoVivification from jcvi.utils.grouper import Grouper from itertools import combinations, product supported_modes = ["name", "coords"] p = OptionParser(consolidate.__doc__) p.add_option("--slop", default=False, action="store_true", help="allow minor variation in terminal 5'/3' UTR" + \ " start/stop position [default: %default]") p.add_option("--inferUTR", default=False, action="store_true", help="infer presence of UTRs from exon coordinates") p.add_option("--mode", default="name", choices=supported_modes, help="method used to determine overlapping loci") p.add_option("--summary", default=False, action="store_true", help="Generate summary table of consolidation process") p.add_option("--clusters", default=False, action="store_true", help="Generate table of cluster members after consolidation") p.set_outfile() opts, args = p.parse_args(args) slop = opts.slop inferUTR = opts.inferUTR mode = opts.mode if len(args) < 2: sys.exit(not p.print_help()) gffdbx = {} for gffile in args: dbn = longest_unique_prefix(gffile, args) gffdbx[dbn] = make_index(gffile) loci = Grouper() for dbn in gffdbx: odbns = [odbn for odbn in gffdbx if dbn != odbn] for gene in gffdbx[dbn].features_of_type('gene', order_by=('seqid', 'start')): if mode == "name": loci.join(gene.id, (gene.id, dbn)) else: if (gene.id, dbn) not in loci: loci.join((gene.id, dbn)) gene_cds = list(gffdbx[dbn].children(gene, \ featuretype='CDS', order_by=('start'))) gene_cds_start, gene_cds_stop = gene_cds[0].start, \ gene_cds[-1].stop for odbn in odbns: for ogene_cds in gffdbx[odbn].region(seqid=gene.seqid, \ start=gene_cds_start, end=gene_cds_stop, \ strand=gene.strand, featuretype='CDS'): for ogene in gffdbx[odbn].parents(ogene_cds, featuretype='gene'): loci.join((gene.id, dbn), (ogene.id, odbn)) gfeats = {} mrna = AutoVivification() for i, locus in enumerate(loci): gene = "gene_{0:0{pad}}".format(i, pad=6) \ if mode == "coords" else None for elem in locus: if type(elem) == tuple: _gene, dbn = elem if gene is None: gene = _gene g = gffdbx[dbn][_gene] if gene not in gfeats: gfeats[gene] = g gfeats[gene].attributes['ID'] = [gene] else: if g.start < gfeats[gene].start: gfeats[gene].start = g.start if g.stop > gfeats[gene].stop: gfeats[gene].stop = g.stop c = list(gffdbx[dbn].children(_gene, featuretype='mRNA', order_by='start')) if len(c) > 0: mrna[gene][dbn] = c fw = must_open(opts.outfile, "w") print("##gff-version 3", file=fw) seen = {} if opts.summary: summaryfile = "{0}.summary.txt".format(opts.outfile.rsplit(".")[0]) sfw = must_open(summaryfile, "w") summary = ["id"] summary.extend(gffdbx.keys()) print("\t".join(str(x) for x in summary), file=sfw) if opts.clusters: clustersfile = "{0}.clusters.txt".format(opts.outfile.rsplit(".")[0]) cfw = must_open(clustersfile, "w") clusters = ["id", "dbns", "members", "trlens"] print("\t".join(str(x) for x in clusters), file=cfw) for gene in mrna: g = Grouper() dbns = list(combinations(mrna[gene], 2)) if len(dbns) > 0: for dbn1, dbn2 in dbns: dbx1, dbx2 = gffdbx[dbn1], gffdbx[dbn2] for mrna1, mrna2 in product(mrna[gene][dbn1], mrna[gene][dbn2]): mrna1s, mrna2s = mrna1.stop - mrna1.start + 1, \ mrna2.stop - mrna2.start + 1 g.join((dbn1, mrna1.id, mrna1s)) g.join((dbn2, mrna2.id, mrna2s)) if match_subfeats(mrna1, mrna2, dbx1, dbx2, featuretype='CDS'): res = [] ftypes = ['exon'] if inferUTR else ['five_prime_UTR', 'three_prime_UTR'] for ftype in ftypes: res.append(match_subfeats(mrna1, mrna2, dbx1, dbx2, featuretype=ftype, slop=slop)) if all(r == True for r in res): g.join((dbn1, mrna1.id, mrna1s), (dbn2, mrna2.id, mrna2s)) else: for dbn1 in mrna[gene]: for mrna1 in mrna[gene][dbn1]: g.join((dbn1, mrna1.id, mrna1.stop - mrna1.start + 1)) print(gfeats[gene], file=fw) for group in g: group.sort(key=lambda x: x[2], reverse=True) dbs, mrnas = [el[0] for el in group], [el[1] for el in group] d, m = dbs[0], mrnas[0] dbid, _mrnaid = "|".join(str(x) for x in set(dbs)), [] for x in mrnas: if x not in _mrnaid: _mrnaid.append(x) mrnaid = "{0}|{1}".format(dbid, "-".join(_mrnaid)) if mrnaid not in seen: seen[mrnaid] = 0 else: seen[mrnaid] += 1 mrnaid = "{0}-{1}".format(mrnaid, seen[mrnaid]) _mrna = gffdbx[d][m] _mrna.attributes['ID'] = [mrnaid] _mrna.attributes['Parent'] = [gene] children = gffdbx[d].children(m, order_by='start') print(_mrna, file=fw) for child in children: child.attributes['ID'] = ["{0}|{1}".format(dbid, child.id)] child.attributes['Parent'] = [mrnaid] print(child, file=fw) if opts.summary: summary = [mrnaid] summary.extend(['Y' if db in set(dbs) else 'N' for db in gffdbx]) print("\t".join(str(x) for x in summary), file=sfw) if opts.clusters: clusters = [mrnaid] clusters.append(",".join(str(el[0]) for el in group)) clusters.append(",".join(str(el[1]) for el in group)) clusters.append(",".join(str(el[2]) for el in group)) print("\t".join(str(x) for x in clusters), file=cfw) fw.close() if opts.summary: sfw.close() if opts.clusters: cfw.close()
[ "def", "consolidate", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "longest_unique_prefix", "from", "jcvi", ".", "formats", ".", "gff", "import", "make_index", ",", "match_subfeats", "from", "jcvi", ".", "utils", ".", "cbook"...
%prog consolidate gffile1 gffile2 ... > consolidated.out Given 2 or more gff files generated by pasa annotation comparison, iterate through each locus (shared locus name or overlapping CDS) and identify same/different isoforms (shared splicing structure) across the input datasets. If `slop` is enabled, consolidation will collapse any variation in terminal UTR lengths, keeping the longest as representative.
[ "%prog", "consolidate", "gffile1", "gffile2", "...", ">", "consolidated", ".", "out" ]
python
train
41.726744
rdussurget/py-altimetry
altimetry/tools/interp_tools.py
https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/tools/interp_tools.py#L88-L114
def interp1d(x,Z,xout,spline=False,kind='linear',fill_value=np.NaN,**kwargs): """ INTERP1D : Interpolate values from a 1D vector at given positions @param x: 1st dimension vector of size NX @author: Renaud DUSSURGET, LER/PAC, Ifremer La Seyne """ linear = not spline nx=len(x) if linear : try : f = scipy.interpolate.interp1d(x, Z, kind=kind,bounds_error=False,fill_value=fill_value,**kwargs) Zout = f(xout) except RuntimeError : Zout = np.repeat(np.NaN,nx) else : tck = scipy.interpolate.splrep(x,Z,s=0) try : Zout = scipy.interpolate.splev(xout,tck,der=0,**kwargs) except RuntimeError : Zout = np.repeat(np.NaN,nx) return Zout
[ "def", "interp1d", "(", "x", ",", "Z", ",", "xout", ",", "spline", "=", "False", ",", "kind", "=", "'linear'", ",", "fill_value", "=", "np", ".", "NaN", ",", "*", "*", "kwargs", ")", ":", "linear", "=", "not", "spline", "nx", "=", "len", "(", "...
INTERP1D : Interpolate values from a 1D vector at given positions @param x: 1st dimension vector of size NX @author: Renaud DUSSURGET, LER/PAC, Ifremer La Seyne
[ "INTERP1D", ":", "Interpolate", "values", "from", "a", "1D", "vector", "at", "given", "positions" ]
python
train
28.62963
seomoz/shovel
shovel/args.py
https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/args.py#L69-L81
def explain(self, *args, **kwargs): '''Return a string that describes how these args are interpreted''' args = self.get(*args, **kwargs) results = ['%s = %s' % (name, value) for name, value in args.required] results.extend(['%s = %s (overridden)' % ( name, value) for name, value in args.overridden]) results.extend(['%s = %s (default)' % ( name, value) for name, value in args.defaulted]) if self._varargs: results.append('%s = %s' % (self._varargs, args.varargs)) if self._kwargs: results.append('%s = %s' % (self._kwargs, args.kwargs)) return '\n\t'.join(results)
[ "def", "explain", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "=", "self", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "results", "=", "[", "'%s = %s'", "%", "(", "name", ",", "value", ")", "for", ...
Return a string that describes how these args are interpreted
[ "Return", "a", "string", "that", "describes", "how", "these", "args", "are", "interpreted" ]
python
train
51.230769
wtsi-hgi/python-git-subrepo
gitsubrepo/subrepo.py
https://github.com/wtsi-hgi/python-git-subrepo/blob/bb2eb2bd9a7e51b862298ddb4168cc5b8633dad0/gitsubrepo/subrepo.py#L144-L159
def pull(directory: str) -> Commit: """ Pulls the subrepo that has been cloned into the given directory. :param directory: the directory containing the subrepo :return: the commit the subrepo is on """ if not os.path.exists(directory): raise ValueError(f"No subrepo found in \"{directory}\"") try: result = run([GIT_COMMAND, _GIT_SUBREPO_COMMAND, _GIT_SUBREPO_PULL_COMMAND, _GIT_SUBREPO_VERBOSE_FLAG, get_directory_relative_to_git_root(directory)], execution_directory=get_git_root_directory(directory)) except RunException as e: if "Can't pull subrepo. Working tree has changes" in e.stderr: raise UnstagedChangeException() from e return status(directory)[2]
[ "def", "pull", "(", "directory", ":", "str", ")", "->", "Commit", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "raise", "ValueError", "(", "f\"No subrepo found in \\\"{directory}\\\"\"", ")", "try", ":", "result", "=", "r...
Pulls the subrepo that has been cloned into the given directory. :param directory: the directory containing the subrepo :return: the commit the subrepo is on
[ "Pulls", "the", "subrepo", "that", "has", "been", "cloned", "into", "the", "given", "directory", ".", ":", "param", "directory", ":", "the", "directory", "containing", "the", "subrepo", ":", "return", ":", "the", "commit", "the", "subrepo", "is", "on" ]
python
train
47.4375
frictionlessdata/tableschema-sql-py
tableschema_sql/storage.py
https://github.com/frictionlessdata/tableschema-sql-py/blob/81ca4b564f6dac5fe3adc6553b353826190df6f8/tableschema_sql/storage.py#L144-L160
def describe(self, bucket, descriptor=None): """https://github.com/frictionlessdata/tableschema-sql-py#storage """ # Set descriptor if descriptor is not None: self.__descriptors[bucket] = descriptor # Get descriptor else: descriptor = self.__descriptors.get(bucket) if descriptor is None: table = self.__get_table(bucket) descriptor = self.__mapper.restore_descriptor( table.name, table.columns, table.constraints, self.__autoincrement) return descriptor
[ "def", "describe", "(", "self", ",", "bucket", ",", "descriptor", "=", "None", ")", ":", "# Set descriptor", "if", "descriptor", "is", "not", "None", ":", "self", ".", "__descriptors", "[", "bucket", "]", "=", "descriptor", "# Get descriptor", "else", ":", ...
https://github.com/frictionlessdata/tableschema-sql-py#storage
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "tableschema", "-", "sql", "-", "py#storage" ]
python
train
34.411765
gem/oq-engine
openquake/hazardlib/shakemap.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/shakemap.py#L278-L317
def to_gmfs(shakemap, spatialcorr, crosscorr, site_effects, trunclevel, num_gmfs, seed, imts=None): """ :returns: (IMT-strings, array of GMFs of shape (R, N, E, M) """ N = len(shakemap) # number of sites std = shakemap['std'] if imts is None or len(imts) == 0: imts = std.dtype.names else: imts = [imt for imt in imts if imt in std.dtype.names] val = {imt: numpy.log(shakemap['val'][imt]) - std[imt] ** 2 / 2. for imt in imts} imts_ = [imt.from_string(name) for name in imts] M = len(imts_) cross_corr = cross_correlation_matrix(imts_, crosscorr) mu = numpy.array([numpy.ones(num_gmfs) * val[str(imt)][j] for imt in imts_ for j in range(N)]) dmatrix = geo.geodetic.distance_matrix( shakemap['lon'], shakemap['lat']) spatial_corr = spatial_correlation_array(dmatrix, imts_, spatialcorr) stddev = [std[str(imt)] for imt in imts_] for im, std in zip(imts_, stddev): if std.sum() == 0: raise ValueError('Cannot decompose the spatial covariance ' 'because stddev==0 for IMT=%s' % im) spatial_cov = spatial_covariance_array(stddev, spatial_corr) L = cholesky(spatial_cov, cross_corr) # shape (M * N, M * N) if trunclevel: Z = truncnorm.rvs(-trunclevel, trunclevel, loc=0, scale=1, size=(M * N, num_gmfs), random_state=seed) else: Z = norm.rvs(loc=0, scale=1, size=(M * N, num_gmfs), random_state=seed) # Z has shape (M * N, E) gmfs = numpy.exp(numpy.dot(L, Z) + mu) / PCTG if site_effects: gmfs = amplify_gmfs(imts_, shakemap['vs30'], gmfs) if gmfs.max() > MAX_GMV: logging.warning('There suspiciously large GMVs of %.2fg', gmfs.max()) return imts, gmfs.reshape((M, N, num_gmfs)).transpose(1, 2, 0)
[ "def", "to_gmfs", "(", "shakemap", ",", "spatialcorr", ",", "crosscorr", ",", "site_effects", ",", "trunclevel", ",", "num_gmfs", ",", "seed", ",", "imts", "=", "None", ")", ":", "N", "=", "len", "(", "shakemap", ")", "# number of sites", "std", "=", "sh...
:returns: (IMT-strings, array of GMFs of shape (R, N, E, M)
[ ":", "returns", ":", "(", "IMT", "-", "strings", "array", "of", "GMFs", "of", "shape", "(", "R", "N", "E", "M", ")" ]
python
train
45.775
cozy/python_cozy_management
cozy_management/monitor.py
https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/monitor.py#L36-L69
def status(app_name=None, only_cozy=False, as_boolean=False): '''Get apps status :param app_name: If pass app name return this app status :return: dict with all apps status or str with one app status ''' apps = {} # Get all apps status & slip them apps_status = subprocess.Popen('cozy-monitor status', shell=True, stdout=subprocess.PIPE).stdout.read() apps_status = apps_status.split('\n') # Parse result to store them in apps dictionary for app_status in apps_status: if app_status: app_status = ANSI_ESCAPE.sub('', app_status).split(': ') if len(app_status) == 2: current_status = app_status[1] if as_boolean: if app_status[1] == 'up': current_status = True else: current_status = False if only_cozy and app_status[0] not in SYSTEM_APPS: apps[app_status[0]] = current_status else: apps[app_status[0]] = current_status # Return app status if get as param or return all apps status if app_name: return apps.get(app_name, None) else: return apps
[ "def", "status", "(", "app_name", "=", "None", ",", "only_cozy", "=", "False", ",", "as_boolean", "=", "False", ")", ":", "apps", "=", "{", "}", "# Get all apps status & slip them", "apps_status", "=", "subprocess", ".", "Popen", "(", "'cozy-monitor status'", ...
Get apps status :param app_name: If pass app name return this app status :return: dict with all apps status or str with one app status
[ "Get", "apps", "status" ]
python
train
37.529412
saltstack/salt
salt/grains/junos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/junos.py#L30-L41
def _remove_complex_types(dictionary): ''' Linode-python is now returning some complex types that are not serializable by msgpack. Kill those. ''' for k, v in six.iteritems(dictionary): if isinstance(v, dict): dictionary[k] = _remove_complex_types(v) elif hasattr(v, 'to_eng_string'): dictionary[k] = v.to_eng_string() return dictionary
[ "def", "_remove_complex_types", "(", "dictionary", ")", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "dictionary", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "dictionary", "[", "k", "]", "=", "_remove_complex_types",...
Linode-python is now returning some complex types that are not serializable by msgpack. Kill those.
[ "Linode", "-", "python", "is", "now", "returning", "some", "complex", "types", "that", "are", "not", "serializable", "by", "msgpack", ".", "Kill", "those", "." ]
python
train
32.583333
bitesofcode/projexui
projexui/widgets/xchartwidget/xchartwidgetitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchartwidget/xchartwidgetitem.py#L640-L647
def setKeyColor( self, key, color ): """ Sets the color used when rendering pie charts. :param key | <str> color | <QColor> """ self._keyColors[nativestring(key)] = QColor(color)
[ "def", "setKeyColor", "(", "self", ",", "key", ",", "color", ")", ":", "self", ".", "_keyColors", "[", "nativestring", "(", "key", ")", "]", "=", "QColor", "(", "color", ")" ]
Sets the color used when rendering pie charts. :param key | <str> color | <QColor>
[ "Sets", "the", "color", "used", "when", "rendering", "pie", "charts", ".", ":", "param", "key", "|", "<str", ">", "color", "|", "<QColor", ">" ]
python
train
31.75
ARMmbed/mbed-connector-api-python
mbed_connector_api/mbed_connector_api.py
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L373-L390
def deleteAllSubscriptions(self): ''' Delete all subscriptions on the domain (all endpoints, all resources) :return: successful ``.status_code`` / ``.is_done``. Check the ``.error`` :rtype: asyncResult ''' result = asyncResult() data = self._deleteURL("/subscriptions/") if data.status_code == 204: #immediate success result.error = False result.is_done = True else: result.error = response_codes("unsubscribe",data.status_code) result.is_done = True result.raw_data = data.content result.status_code = data.status_code return result
[ "def", "deleteAllSubscriptions", "(", "self", ")", ":", "result", "=", "asyncResult", "(", ")", "data", "=", "self", ".", "_deleteURL", "(", "\"/subscriptions/\"", ")", "if", "data", ".", "status_code", "==", "204", ":", "#immediate success", "result", ".", ...
Delete all subscriptions on the domain (all endpoints, all resources) :return: successful ``.status_code`` / ``.is_done``. Check the ``.error`` :rtype: asyncResult
[ "Delete", "all", "subscriptions", "on", "the", "domain", "(", "all", "endpoints", "all", "resources", ")", ":", "return", ":", "successful", ".", "status_code", "/", ".", "is_done", ".", "Check", "the", ".", "error", ":", "rtype", ":", "asyncResult" ]
python
train
30.833333
Toilal/rebulk
rebulk/chain.py
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/chain.py#L139-L156
def string(self, *pattern, **kwargs): """ Add string pattern :param pattern: :type pattern: :param kwargs: :type kwargs: :return: :rtype: """ set_defaults(self._kwargs, kwargs) set_defaults(self._functional_defaults, kwargs) set_defaults(self._defaults, kwargs) pattern = self.rebulk.build_string(*pattern, **kwargs) part = ChainPart(self, pattern) self.parts.append(part) return part
[ "def", "string", "(", "self", ",", "*", "pattern", ",", "*", "*", "kwargs", ")", ":", "set_defaults", "(", "self", ".", "_kwargs", ",", "kwargs", ")", "set_defaults", "(", "self", ".", "_functional_defaults", ",", "kwargs", ")", "set_defaults", "(", "sel...
Add string pattern :param pattern: :type pattern: :param kwargs: :type kwargs: :return: :rtype:
[ "Add", "string", "pattern" ]
python
train
27.555556
google/dotty
efilter/parsers/dottysql/parser.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L666-L695
def application(self, func): """Parse the function application subgrammar. Function application can, conceptually, be thought of as a mixfix operator, similar to the way array subscripting works. However, it is not clear at this point whether we want to allow it to work as such, because doing so would permit queries to, at runtime, select methods out of an arbitrary object and then call them. While there is a function whitelist and preventing this sort of thing in the syntax isn't a security feature, it still seems like the syntax should make it clear what the intended use of application is. If we later decide to extend DottySQL to allow function application over an arbitrary LHS expression then that syntax would be a strict superset of the current syntax and backwards compatible. """ start = self.tokens.matched.start if self.tokens.accept(common_grammar.rparen): # That was easy. return ast.Apply(func, start=start, end=self.tokens.matched.end, source=self.original) arguments = [self.expression()] while self.tokens.accept(common_grammar.comma): arguments.append(self.expression()) self.tokens.expect(common_grammar.rparen) return ast.Apply(func, *arguments, start=start, end=self.tokens.matched.end, source=self.original)
[ "def", "application", "(", "self", ",", "func", ")", ":", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", "if", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "rparen", ")", ":", "# That was easy.", "return", "ast", ...
Parse the function application subgrammar. Function application can, conceptually, be thought of as a mixfix operator, similar to the way array subscripting works. However, it is not clear at this point whether we want to allow it to work as such, because doing so would permit queries to, at runtime, select methods out of an arbitrary object and then call them. While there is a function whitelist and preventing this sort of thing in the syntax isn't a security feature, it still seems like the syntax should make it clear what the intended use of application is. If we later decide to extend DottySQL to allow function application over an arbitrary LHS expression then that syntax would be a strict superset of the current syntax and backwards compatible.
[ "Parse", "the", "function", "application", "subgrammar", "." ]
python
train
48.466667
opencobra/memote
memote/support/consistency_helpers.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L100-L144
def rank(matrix, atol=1e-13, rtol=0): """ Estimate the rank, i.e., the dimension of the column space, of a matrix. The algorithm used by this function is based on the singular value decomposition of `stoichiometry_matrix`. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- int The estimated rank of the matrix. See Also -------- numpy.linalg.matrix_rank matrix_rank is basically the same as this function, but it does not provide the option of the absolute tolerance. """ matrix = np.atleast_2d(matrix) sigma = svd(matrix, compute_uv=False) tol = max(atol, rtol * sigma[0]) return int((sigma >= tol).sum())
[ "def", "rank", "(", "matrix", ",", "atol", "=", "1e-13", ",", "rtol", "=", "0", ")", ":", "matrix", "=", "np", ".", "atleast_2d", "(", "matrix", ")", "sigma", "=", "svd", "(", "matrix", ",", "compute_uv", "=", "False", ")", "tol", "=", "max", "("...
Estimate the rank, i.e., the dimension of the column space, of a matrix. The algorithm used by this function is based on the singular value decomposition of `stoichiometry_matrix`. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- int The estimated rank of the matrix. See Also -------- numpy.linalg.matrix_rank matrix_rank is basically the same as this function, but it does not provide the option of the absolute tolerance.
[ "Estimate", "the", "rank", "i", ".", "e", ".", "the", "dimension", "of", "the", "column", "space", "of", "a", "matrix", "." ]
python
train
30.533333
spyder-ide/spyder
spyder/plugins/editor/extensions/closequotes.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/extensions/closequotes.py#L14-L38
def unmatched_quotes_in_line(text): """Return whether a string has open quotes. This simply counts whether the number of quote characters of either type in the string is odd. Take from the IPython project (in IPython/core/completer.py in v0.13) Spyder team: Add some changes to deal with escaped quotes - Copyright (C) 2008-2011 IPython Development Team - Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu> - Copyright (C) 2001 Python Software Foundation, www.python.org Distributed under the terms of the BSD License. """ # We check " first, then ', so complex cases with nested quotes will # get the " to take precedence. text = text.replace("\\'", "") text = text.replace('\\"', '') if text.count('"') % 2: return '"' elif text.count("'") % 2: return "'" else: return ''
[ "def", "unmatched_quotes_in_line", "(", "text", ")", ":", "# We check \" first, then ', so complex cases with nested quotes will", "# get the \" to take precedence.", "text", "=", "text", ".", "replace", "(", "\"\\\\'\"", ",", "\"\"", ")", "text", "=", "text", ".", "repla...
Return whether a string has open quotes. This simply counts whether the number of quote characters of either type in the string is odd. Take from the IPython project (in IPython/core/completer.py in v0.13) Spyder team: Add some changes to deal with escaped quotes - Copyright (C) 2008-2011 IPython Development Team - Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu> - Copyright (C) 2001 Python Software Foundation, www.python.org Distributed under the terms of the BSD License.
[ "Return", "whether", "a", "string", "has", "open", "quotes", "." ]
python
train
34.24
Kortemme-Lab/klab
klab/cloning/cloning.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L87-L134
def sanitize_codon_list(codon_list, forbidden_seqs=()): """ Make silent mutations to the given codon lists to remove any undesirable sequences that are present within it. Undesirable sequences include restriction sites, which may be optionally specified as a second argument, and homopolymers above a pre-defined length. The return value is the number of corrections made to the codon list. """ # Unit test missing for: # Homopolymer fixing for codon in codon_list: if len(codon) != 3: raise ValueError("Codons must have exactly 3 bases: '{}'".format(codon)) # Compile a collection of all the sequences we don't want to appear in the # gene. This includes the given restriction sites and their reverse # complements, plus any homopolymers above a pre-defined length. bad_seqs = set() bad_seqs.union( restriction_sites.get(seq, seq) for seq in forbidden_seqs) bad_seqs.union( dna.reverse_complement(seq) for seq in bad_seqs) bad_seqs.union( base * (gen9.homopolymer_max_lengths[base] + 1) for base in dna.dna_bases) bad_seqs = [ dna.dna_to_re(bs) for bs in bad_seqs] # Remove every bad sequence from the gene by making silent mutations to the # codon list. num_corrections = 0 for bad_seq in bad_seqs: while remove_bad_sequence(codon_list, bad_seq, bad_seqs): num_corrections += 1 return num_corrections
[ "def", "sanitize_codon_list", "(", "codon_list", ",", "forbidden_seqs", "=", "(", ")", ")", ":", "# Unit test missing for:", "# Homopolymer fixing", "for", "codon", "in", "codon_list", ":", "if", "len", "(", "codon", ")", "!=", "3", ":", "raise", "ValueError",...
Make silent mutations to the given codon lists to remove any undesirable sequences that are present within it. Undesirable sequences include restriction sites, which may be optionally specified as a second argument, and homopolymers above a pre-defined length. The return value is the number of corrections made to the codon list.
[ "Make", "silent", "mutations", "to", "the", "given", "codon", "lists", "to", "remove", "any", "undesirable", "sequences", "that", "are", "present", "within", "it", ".", "Undesirable", "sequences", "include", "restriction", "sites", "which", "may", "be", "optiona...
python
train
31.5625
ElementAI/greensim
greensim/__init__.py
https://github.com/ElementAI/greensim/blob/f160e8b57d69f6ef469f2e991cc07b7721e08a91/greensim/__init__.py#L480-L500
def _bind_and_call_constructor(self, t: type, *args) -> None: """ Accesses the __init__ method of a type directly and calls it with *args This allows the constructors of both superclasses to be called, as described in get_binding.md This could be done using two calls to super() with a hack based on how Python searches __mro__: ``` super().__init__(run, parent) # calls greenlet.greenlet.__init__ super(greenlet.greenlet, self).__init__() # calls TaggedObject.__init__ ``` Python will always find greenlet.greenlet first since it is specified first, but will ignore it if it is the first argument to super, which is meant to indicate the subclass and thus is not meant to be called on See: https://docs.python.org/3.7/library/functions.html#super This is indirect, confusing, and not in following with the purpose of super(), so the direct method was used """ t.__init__.__get__(self)(*args)
[ "def", "_bind_and_call_constructor", "(", "self", ",", "t", ":", "type", ",", "*", "args", ")", "->", "None", ":", "t", ".", "__init__", ".", "__get__", "(", "self", ")", "(", "*", "args", ")" ]
Accesses the __init__ method of a type directly and calls it with *args This allows the constructors of both superclasses to be called, as described in get_binding.md This could be done using two calls to super() with a hack based on how Python searches __mro__: ``` super().__init__(run, parent) # calls greenlet.greenlet.__init__ super(greenlet.greenlet, self).__init__() # calls TaggedObject.__init__ ``` Python will always find greenlet.greenlet first since it is specified first, but will ignore it if it is the first argument to super, which is meant to indicate the subclass and thus is not meant to be called on See: https://docs.python.org/3.7/library/functions.html#super This is indirect, confusing, and not in following with the purpose of super(), so the direct method was used
[ "Accesses", "the", "__init__", "method", "of", "a", "type", "directly", "and", "calls", "it", "with", "*", "args" ]
python
train
47.142857
heroku/heroku.py
heroku/structures.py
https://github.com/heroku/heroku.py/blob/cadc0a074896cf29c65a457c5c5bdb2069470af0/heroku/structures.py#L108-L116
def clear(self): """Removes all SSH keys from a user's system.""" r = self._h._http_resource( method='DELETE', resource=('user', 'keys'), ) return r.ok
[ "def", "clear", "(", "self", ")", ":", "r", "=", "self", ".", "_h", ".", "_http_resource", "(", "method", "=", "'DELETE'", ",", "resource", "=", "(", "'user'", ",", "'keys'", ")", ",", ")", "return", "r", ".", "ok" ]
Removes all SSH keys from a user's system.
[ "Removes", "all", "SSH", "keys", "from", "a", "user", "s", "system", "." ]
python
train
22.333333
galaxy-genome-annotation/python-apollo
arrow/commands/annotations/set_symbol.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/arrow/commands/annotations/set_symbol.py#L22-L29
def cli(ctx, feature_id, symbol, organism="", sequence=""): """Set a feature's description Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.set_symbol(feature_id, symbol, organism=organism, sequence=sequence)
[ "def", "cli", "(", "ctx", ",", "feature_id", ",", "symbol", ",", "organism", "=", "\"\"", ",", "sequence", "=", "\"\"", ")", ":", "return", "ctx", ".", "gi", ".", "annotations", ".", "set_symbol", "(", "feature_id", ",", "symbol", ",", "organism", "=",...
Set a feature's description Output: A standard apollo feature dictionary ({"features": [{...}]})
[ "Set", "a", "feature", "s", "description" ]
python
train
33.625
mlavin/django-all-access
allaccess/clients.py
https://github.com/mlavin/django-all-access/blob/4b15b6c9dedf8080a7c477e0af1142c609ec5598/allaccess/clients.py#L99-L109
def get_redirect_args(self, request, callback): "Get request parameters for redirect url." callback = force_text(request.build_absolute_uri(callback)) raw_token = self.get_request_token(request, callback) token, secret = self.parse_raw_token(raw_token) if token is not None and secret is not None: request.session[self.session_key] = raw_token return { 'oauth_token': token, 'oauth_callback': callback, }
[ "def", "get_redirect_args", "(", "self", ",", "request", ",", "callback", ")", ":", "callback", "=", "force_text", "(", "request", ".", "build_absolute_uri", "(", "callback", ")", ")", "raw_token", "=", "self", ".", "get_request_token", "(", "request", ",", ...
Get request parameters for redirect url.
[ "Get", "request", "parameters", "for", "redirect", "url", "." ]
python
train
44.181818
PredixDev/predixpy
predix/security/acs.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/security/acs.py#L241-L268
def add_subject(self, subject_id, attributes, parents=[], issuer='default'): """ Will add the given subject with a given identifier and attribute dictionary. example/ add_subject('/user/j12y', {'username': 'j12y'}) """ # MAINT: consider test to avoid adding duplicate subject id assert isinstance(attributes, (dict)), "attributes expected to be dict" attrs = [] for key in attributes.keys(): attrs.append({ 'issuer': issuer, 'name': key, 'value': attributes[key] }) body = { "subjectIdentifier": subject_id, "parents": parents, "attributes": attrs, } return self._put_subject(subject_id, body)
[ "def", "add_subject", "(", "self", ",", "subject_id", ",", "attributes", ",", "parents", "=", "[", "]", ",", "issuer", "=", "'default'", ")", ":", "# MAINT: consider test to avoid adding duplicate subject id", "assert", "isinstance", "(", "attributes", ",", "(", "...
Will add the given subject with a given identifier and attribute dictionary. example/ add_subject('/user/j12y', {'username': 'j12y'})
[ "Will", "add", "the", "given", "subject", "with", "a", "given", "identifier", "and", "attribute", "dictionary", "." ]
python
train
28.964286
pypa/pipenv
pipenv/vendor/vistir/compat.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/compat.py#L220-L236
def _get_path(path): """ Fetch the string value from a path-like object Returns **None** if there is no string value. """ if isinstance(path, (six.string_types, bytes)): return path path_type = type(path) try: path_repr = path_type.__fspath__(path) except AttributeError: return if isinstance(path_repr, (six.string_types, bytes)): return path_repr return
[ "def", "_get_path", "(", "path", ")", ":", "if", "isinstance", "(", "path", ",", "(", "six", ".", "string_types", ",", "bytes", ")", ")", ":", "return", "path", "path_type", "=", "type", "(", "path", ")", "try", ":", "path_repr", "=", "path_type", "....
Fetch the string value from a path-like object Returns **None** if there is no string value.
[ "Fetch", "the", "string", "value", "from", "a", "path", "-", "like", "object" ]
python
train
24.294118
javipalanca/spade
spade/presence.py
https://github.com/javipalanca/spade/blob/59942bd1a1edae4c807d06cabb178d5630cbf61b/spade/presence.py#L94-L103
def set_available(self, show=None): """ Sets the agent availability to True. Args: show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None) """ show = self.state.show if show is None else show self.set_presence(PresenceState(available=True, show=show))
[ "def", "set_available", "(", "self", ",", "show", "=", "None", ")", ":", "show", "=", "self", ".", "state", ".", "show", "if", "show", "is", "None", "else", "show", "self", ".", "set_presence", "(", "PresenceState", "(", "available", "=", "True", ",", ...
Sets the agent availability to True. Args: show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None)
[ "Sets", "the", "agent", "availability", "to", "True", "." ]
python
train
33.9
alvations/pywsd
pywsd/semeval.py
https://github.com/alvations/pywsd/blob/4c12394c8adbcfed71dd912bdbef2e36370821bf/pywsd/semeval.py#L55-L76
def get_answers(self): """ Returns a {(key,value), ...} dictionary of {(instance_id,Answer),...)} >>> coarse_wsd = SemEval2007_Coarse_WSD() >>> inst2ans = coarse_wsd.get_answers() >>> for inst in inst2ans: ... print inst, inst2ans[inst ... break """ inst2ans = {} with io.open(self.test_ans, 'r') as fin: for line in fin: line, _, lemma = line.strip().rpartition(' !! ') lemma, pos = lemma[6:].split('#') textid, _, line = line.partition(' ') instid, _, line = line.partition(' ') sensekey = line.split() # What to do if there is no synset to convert to... # synsetkey = [semcor_to_synset(i) for i in sensekey] inst2ans[instid] = Answer(sensekey, lemma, pos) return inst2ans
[ "def", "get_answers", "(", "self", ")", ":", "inst2ans", "=", "{", "}", "with", "io", ".", "open", "(", "self", ".", "test_ans", ",", "'r'", ")", "as", "fin", ":", "for", "line", "in", "fin", ":", "line", ",", "_", ",", "lemma", "=", "line", "....
Returns a {(key,value), ...} dictionary of {(instance_id,Answer),...)} >>> coarse_wsd = SemEval2007_Coarse_WSD() >>> inst2ans = coarse_wsd.get_answers() >>> for inst in inst2ans: ... print inst, inst2ans[inst ... break
[ "Returns", "a", "{", "(", "key", "value", ")", "...", "}", "dictionary", "of", "{", "(", "instance_id", "Answer", ")", "...", ")", "}", ">>>", "coarse_wsd", "=", "SemEval2007_Coarse_WSD", "()", ">>>", "inst2ans", "=", "coarse_wsd", ".", "get_answers", "()"...
python
train
40.363636
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L586-L588
def isInfinite(self): """Check if rectangle is infinite.""" return self.x0 > self.x1 or self.y0 > self.y1
[ "def", "isInfinite", "(", "self", ")", ":", "return", "self", ".", "x0", ">", "self", ".", "x1", "or", "self", ".", "y0", ">", "self", ".", "y1" ]
Check if rectangle is infinite.
[ "Check", "if", "rectangle", "is", "infinite", "." ]
python
train
39.666667
jashandeep-sohi/python-blowfish
blowfish.py
https://github.com/jashandeep-sohi/python-blowfish/blob/5ce7f6d54dcef7efd715b26f9a9ffee0d543047e/blowfish.py#L826-L900
def decrypt_cbc_cts(self, data, init_vector): """ Return an iterator that decrypts `data` using the Cipher-Block Chaining with Ciphertext Stealing (CBC-CTS) mode of operation. CBC-CTS mode can only operate on `data` that is greater than 8 bytes in length. Each iteration, except the last, always returns a block-sized :obj:`bytes` object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object with a length less than the block-size, if `data` is not a multiple of the block-size in length. `data` should be a :obj:`bytes`-like object that is greater than 8 bytes in length. If it is not, a :exc:`ValueError` exception is raised. `init_vector` is the initialization vector and should be a :obj:`bytes`-like object with exactly 8 bytes. If it is not, a :exc:`ValueError` exception is raised. """ data_len = len(data) if data_len <= 8: raise ValueError("data is not greater than 8 bytes in length") S1, S2, S3, S4 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack u4_2_pack = self._u4_2_pack u4_2_unpack = self._u4_2_unpack decrypt = self._decrypt try: prev_cipher_L, prev_cipher_R = u4_2_unpack(init_vector) except struct_error: raise ValueError("initialization vector is not 8 bytes in length") extra_bytes = data_len % 8 last_block_stop_i = data_len - extra_bytes last_block_start_i = last_block_stop_i - 8 for cipher_L, cipher_R in self._u4_2_iter_unpack( data[0:last_block_start_i] ): L, R = decrypt( cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack ) yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R) prev_cipher_L = cipher_L prev_cipher_R = cipher_R cipher_L, cipher_R = u4_2_unpack(data[last_block_start_i:last_block_stop_i]) L, R = decrypt( cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack ) C_L, C_R = u4_2_unpack(data[last_block_stop_i:] + bytes(8 - extra_bytes)) Xn = u4_2_pack(L ^ C_L, R ^ C_R) E_L, E_R = u4_2_unpack(data[last_block_stop_i:] + Xn[extra_bytes:]) L, R = decrypt( E_L, E_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack ) yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R) yield Xn[:extra_bytes]
[ "def", "decrypt_cbc_cts", "(", "self", ",", "data", ",", "init_vector", ")", ":", "data_len", "=", "len", "(", "data", ")", "if", "data_len", "<=", "8", ":", "raise", "ValueError", "(", "\"data is not greater than 8 bytes in length\"", ")", "S1", ",", "S2", ...
Return an iterator that decrypts `data` using the Cipher-Block Chaining with Ciphertext Stealing (CBC-CTS) mode of operation. CBC-CTS mode can only operate on `data` that is greater than 8 bytes in length. Each iteration, except the last, always returns a block-sized :obj:`bytes` object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object with a length less than the block-size, if `data` is not a multiple of the block-size in length. `data` should be a :obj:`bytes`-like object that is greater than 8 bytes in length. If it is not, a :exc:`ValueError` exception is raised. `init_vector` is the initialization vector and should be a :obj:`bytes`-like object with exactly 8 bytes. If it is not, a :exc:`ValueError` exception is raised.
[ "Return", "an", "iterator", "that", "decrypts", "data", "using", "the", "Cipher", "-", "Block", "Chaining", "with", "Ciphertext", "Stealing", "(", "CBC", "-", "CTS", ")", "mode", "of", "operation", ".", "CBC", "-", "CTS", "mode", "can", "only", "operate", ...
python
train
31.64
pantsbuild/pants
src/python/pants/pantsd/process_manager.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/process_manager.py#L337-L340
def write_pid(self, pid=None): """Write the current processes PID to the pidfile location""" pid = pid or os.getpid() self.write_metadata_by_name(self._name, 'pid', str(pid))
[ "def", "write_pid", "(", "self", ",", "pid", "=", "None", ")", ":", "pid", "=", "pid", "or", "os", ".", "getpid", "(", ")", "self", ".", "write_metadata_by_name", "(", "self", ".", "_name", ",", "'pid'", ",", "str", "(", "pid", ")", ")" ]
Write the current processes PID to the pidfile location
[ "Write", "the", "current", "processes", "PID", "to", "the", "pidfile", "location" ]
python
train
45.75
cheind/tf-matplotlib
tfmpl/create.py
https://github.com/cheind/tf-matplotlib/blob/c6904d3d2d306d9a479c24fbcb1f674a57dafd0e/tfmpl/create.py#L9-L23
def create_figure(*fig_args, **fig_kwargs): '''Create a single figure. Args and Kwargs are passed to `matplotlib.figure.Figure`. This routine is provided in order to avoid usage of pyplot which is stateful and not thread safe. As drawing routines in tf-matplotlib are called from py-funcs in their respective thread, avoid usage of pyplot where possible. ''' fig = Figure(*fig_args, **fig_kwargs) # Attach canvas FigureCanvas(fig) return fig
[ "def", "create_figure", "(", "*", "fig_args", ",", "*", "*", "fig_kwargs", ")", ":", "fig", "=", "Figure", "(", "*", "fig_args", ",", "*", "*", "fig_kwargs", ")", "# Attach canvas", "FigureCanvas", "(", "fig", ")", "return", "fig" ]
Create a single figure. Args and Kwargs are passed to `matplotlib.figure.Figure`. This routine is provided in order to avoid usage of pyplot which is stateful and not thread safe. As drawing routines in tf-matplotlib are called from py-funcs in their respective thread, avoid usage of pyplot where possible.
[ "Create", "a", "single", "figure", "." ]
python
train
31.866667
pybel/pybel-tools
src/pybel_tools/summary/edge_summary.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/summary/edge_summary.py#L145-L152
def get_contradictory_pairs(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]: """Iterates over contradictory node pairs in the graph based on their causal relationships :return: An iterator over (source, target) node pairs that have contradictory causal edges """ for u, v in graph.edges(): if pair_has_contradiction(graph, u, v): yield u, v
[ "def", "get_contradictory_pairs", "(", "graph", ":", "BELGraph", ")", "->", "Iterable", "[", "Tuple", "[", "BaseEntity", ",", "BaseEntity", "]", "]", ":", "for", "u", ",", "v", "in", "graph", ".", "edges", "(", ")", ":", "if", "pair_has_contradiction", "...
Iterates over contradictory node pairs in the graph based on their causal relationships :return: An iterator over (source, target) node pairs that have contradictory causal edges
[ "Iterates", "over", "contradictory", "node", "pairs", "in", "the", "graph", "based", "on", "their", "causal", "relationships", ":", "return", ":", "An", "iterator", "over", "(", "source", "target", ")", "node", "pairs", "that", "have", "contradictory", "causal...
python
valid
48.25
Esri/ArcREST
src/arcresthelper/common.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcresthelper/common.py#L309-L343
def online_time_to_string(value, timeFormat, utcOffset=0): """Converts AGOL timestamp to formatted string. Args: value (float): A UTC timestamp as reported by AGOL (time in ms since Unix epoch * 1000) timeFormat (str): Date/Time format string as parsed by :py:func:`datetime.strftime`. utcOffset (int): Hours difference from UTC and desired output. Default is 0 (remain in UTC). Returns: str: A string representation of the timestamp. Examples: >>> arcresthelper.common.online_time_to_string(1457167261000.0, "%Y-%m-%d %H:%M:%S") '2016-03-05 00:41:01' >>> arcresthelper.common.online_time_to_string(731392515000.0, '%m/%d/%Y %H:%M:%S', -8) # PST is UTC-8:00 '03/05/1993 12:35:15' See Also: :py:func:`local_time_to_online` for converting a :py:class:`datetime.datetime` object to AGOL timestamp """ try: return datetime.datetime.fromtimestamp(value/1000 + utcOffset*3600).strftime(timeFormat) except: line, filename, synerror = trace() raise ArcRestHelperError({ "function": "online_time_to_string", "line": line, "filename": filename, "synerror": synerror, } ) finally: pass
[ "def", "online_time_to_string", "(", "value", ",", "timeFormat", ",", "utcOffset", "=", "0", ")", ":", "try", ":", "return", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "value", "/", "1000", "+", "utcOffset", "*", "3600", ")", ".", "strftime", ...
Converts AGOL timestamp to formatted string. Args: value (float): A UTC timestamp as reported by AGOL (time in ms since Unix epoch * 1000) timeFormat (str): Date/Time format string as parsed by :py:func:`datetime.strftime`. utcOffset (int): Hours difference from UTC and desired output. Default is 0 (remain in UTC). Returns: str: A string representation of the timestamp. Examples: >>> arcresthelper.common.online_time_to_string(1457167261000.0, "%Y-%m-%d %H:%M:%S") '2016-03-05 00:41:01' >>> arcresthelper.common.online_time_to_string(731392515000.0, '%m/%d/%Y %H:%M:%S', -8) # PST is UTC-8:00 '03/05/1993 12:35:15' See Also: :py:func:`local_time_to_online` for converting a :py:class:`datetime.datetime` object to AGOL timestamp
[ "Converts", "AGOL", "timestamp", "to", "formatted", "string", "." ]
python
train
38.228571
dcramer/django-ratings
djangoratings/__init__.py
https://github.com/dcramer/django-ratings/blob/4d00dedc920a4e32d650dc12d5f480c51fc6216c/djangoratings/__init__.py#L16-L26
def get_revision(): """ :returns: Revision number of this branch/checkout, if available. None if no revision number can be determined. """ package_dir = os.path.dirname(__file__) checkout_dir = os.path.normpath(os.path.join(package_dir, '..')) path = os.path.join(checkout_dir, '.git') if os.path.exists(path): return _get_git_revision(path) return None
[ "def", "get_revision", "(", ")", ":", "package_dir", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "checkout_dir", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "package_dir", ",", "'..'", ")", ")"...
:returns: Revision number of this branch/checkout, if available. None if no revision number can be determined.
[ ":", "returns", ":", "Revision", "number", "of", "this", "branch", "/", "checkout", "if", "available", ".", "None", "if", "no", "revision", "number", "can", "be", "determined", "." ]
python
train
35.545455
mitsei/dlkit
dlkit/json_/grading/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/objects.py#L659-L677
def set_lowest_numeric_score(self, score): """Sets the lowest numeric score. arg: score (decimal): the lowest numeric score raise: InvalidArgument - ``score`` is invalid raise: NoAccess - ``score`` cannot be modified *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score if self.get_lowest_numeric_score_metadata().is_read_only(): raise errors.NoAccess() try: score = float(score) except ValueError: raise errors.InvalidArgument() if not self._is_valid_decimal(score, self.get_lowest_numeric_score_metadata()): raise errors.InvalidArgument() self._my_map['lowestNumericScore'] = score
[ "def", "set_lowest_numeric_score", "(", "self", ",", "score", ")", ":", "# Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score", "if", "self", ".", "get_lowest_numeric_score_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "e...
Sets the lowest numeric score. arg: score (decimal): the lowest numeric score raise: InvalidArgument - ``score`` is invalid raise: NoAccess - ``score`` cannot be modified *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "lowest", "numeric", "score", "." ]
python
train
42.789474
ltworf/typedload
typedload/dataloader.py
https://github.com/ltworf/typedload/blob/7fd130612963bfcec3242698463ef863ca4af927/typedload/dataloader.py#L436-L464
def _enumload(l: Loader, value, type_) -> Enum: """ This loads something into an Enum. It tries with basic types first. If that fails, it tries to look for type annotations inside the Enum, and tries to use those to load the value into something that is compatible with the Enum. Of course if that fails too, a ValueError is raised. """ try: # Try naïve conversion return type_(value) except: pass # Try with the typing hints for _, t in get_type_hints(type_).items(): try: return type_(l.load(value, t)) except: pass raise TypedloadValueError( 'Value could not be loaded into %s' % type_, value=value, type_=type_ )
[ "def", "_enumload", "(", "l", ":", "Loader", ",", "value", ",", "type_", ")", "->", "Enum", ":", "try", ":", "# Try naïve conversion", "return", "type_", "(", "value", ")", "except", ":", "pass", "# Try with the typing hints", "for", "_", ",", "t", "in", ...
This loads something into an Enum. It tries with basic types first. If that fails, it tries to look for type annotations inside the Enum, and tries to use those to load the value into something that is compatible with the Enum. Of course if that fails too, a ValueError is raised.
[ "This", "loads", "something", "into", "an", "Enum", "." ]
python
train
25.37931
Robpol86/colorclass
colorclass/core.py
https://github.com/Robpol86/colorclass/blob/692e2d6f5ad470b6221c8cb9641970dc5563a572/colorclass/core.py#L10-L23
def apply_text(incoming, func): """Call `func` on text portions of incoming color string. :param iter incoming: Incoming string/ColorStr/string-like object to iterate. :param func: Function to call with string portion as first and only parameter. :return: Modified string, same class type as incoming string. """ split = RE_SPLIT.split(incoming) for i, item in enumerate(split): if not item or RE_SPLIT.match(item): continue split[i] = func(item) return incoming.__class__().join(split)
[ "def", "apply_text", "(", "incoming", ",", "func", ")", ":", "split", "=", "RE_SPLIT", ".", "split", "(", "incoming", ")", "for", "i", ",", "item", "in", "enumerate", "(", "split", ")", ":", "if", "not", "item", "or", "RE_SPLIT", ".", "match", "(", ...
Call `func` on text portions of incoming color string. :param iter incoming: Incoming string/ColorStr/string-like object to iterate. :param func: Function to call with string portion as first and only parameter. :return: Modified string, same class type as incoming string.
[ "Call", "func", "on", "text", "portions", "of", "incoming", "color", "string", "." ]
python
train
38.214286
Qiskit/qiskit-api-py
IBMQuantumExperience/IBMQuantumExperience.py
https://github.com/Qiskit/qiskit-api-py/blob/2ab240110fb7e653254e44c4833f3643e8ae7f0f/IBMQuantumExperience/IBMQuantumExperience.py#L771-L800
def get_jobs(self, limit=10, skip=0, backend=None, only_completed=False, filter=None, hub=None, group=None, project=None, access_token=None, user_id=None): """ Get the information about the user jobs """ if access_token: self.req.credential.set_token(access_token) if user_id: self.req.credential.set_user_id(user_id) if not self.check_credentials(): return {"error": "Not credentials valid"} url = get_job_url(self.config, hub, group, project) url_filter = '&filter=' query = { "order": "creationDate DESC", "limit": limit, "skip": skip, "where" : {} } if filter is not None: query['where'] = filter else: if backend is not None: query['where']['backend.name'] = backend if only_completed: query['where']['status'] = 'COMPLETED' url_filter = url_filter + json.dumps(query) jobs = self.req.get(url, url_filter) return jobs
[ "def", "get_jobs", "(", "self", ",", "limit", "=", "10", ",", "skip", "=", "0", ",", "backend", "=", "None", ",", "only_completed", "=", "False", ",", "filter", "=", "None", ",", "hub", "=", "None", ",", "group", "=", "None", ",", "project", "=", ...
Get the information about the user jobs
[ "Get", "the", "information", "about", "the", "user", "jobs" ]
python
train
34.9
denisenkom/pytds
src/pytds/tds.py
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L889-L906
def querying_context(self, packet_type): """ Context manager for querying. Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block, and to TDS_PENDING if managed block succeeds and flushes buffer. """ if self.set_state(tds_base.TDS_QUERYING) != tds_base.TDS_QUERYING: raise tds_base.Error("Couldn't switch to state") self._writer.begin_packet(packet_type) try: yield except: if self.state != tds_base.TDS_DEAD: self.set_state(tds_base.TDS_IDLE) raise else: self.set_state(tds_base.TDS_PENDING) self._writer.flush()
[ "def", "querying_context", "(", "self", ",", "packet_type", ")", ":", "if", "self", ".", "set_state", "(", "tds_base", ".", "TDS_QUERYING", ")", "!=", "tds_base", ".", "TDS_QUERYING", ":", "raise", "tds_base", ".", "Error", "(", "\"Couldn't switch to state\"", ...
Context manager for querying. Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block, and to TDS_PENDING if managed block succeeds and flushes buffer.
[ "Context", "manager", "for", "querying", "." ]
python
train
38.833333
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_gimbal.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_gimbal.py#L58-L74
def cmd_gimbal_mode(self, args): '''control gimbal mode''' if len(args) != 1: print("usage: gimbal mode <GPS|MAVLink>") return if args[0].upper() == 'GPS': mode = mavutil.mavlink.MAV_MOUNT_MODE_GPS_POINT elif args[0].upper() == 'MAVLINK': mode = mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING elif args[0].upper() == 'RC': mode = mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING else: print("Unsupported mode %s" % args[0]) self.master.mav.mount_configure_send(self.target_system, self.target_component, mode, 1, 1, 1)
[ "def", "cmd_gimbal_mode", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "1", ":", "print", "(", "\"usage: gimbal mode <GPS|MAVLink>\"", ")", "return", "if", "args", "[", "0", "]", ".", "upper", "(", ")", "==", "'GPS'", ":", ...
control gimbal mode
[ "control", "gimbal", "mode" ]
python
train
44.823529
Jaymon/prom
prom/interface/postgres.py
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/postgres.py#L292-L314
def _set_index(self, schema, name, fields, **index_options): """ NOTE -- we set the index name using <table_name>_<name> format since indexes have to have a globally unique name in postgres http://www.postgresql.org/docs/9.1/static/sql-createindex.html """ index_fields = [] for field_name in fields: field = schema.fields[field_name] if issubclass(field.type, basestring): if field.options.get('ignore_case', False): field_name = 'UPPER({})'.format(self._normalize_name(field_name)) index_fields.append(field_name) query_str = 'CREATE {}INDEX {} ON {} USING BTREE ({})'.format( 'UNIQUE ' if index_options.get('unique', False) else '', self._normalize_name("{}_{}".format(schema, name)), self._normalize_table_name(schema), ', '.join(index_fields) ) return self.query(query_str, ignore_result=True, **index_options)
[ "def", "_set_index", "(", "self", ",", "schema", ",", "name", ",", "fields", ",", "*", "*", "index_options", ")", ":", "index_fields", "=", "[", "]", "for", "field_name", "in", "fields", ":", "field", "=", "schema", ".", "fields", "[", "field_name", "]...
NOTE -- we set the index name using <table_name>_<name> format since indexes have to have a globally unique name in postgres http://www.postgresql.org/docs/9.1/static/sql-createindex.html
[ "NOTE", "--", "we", "set", "the", "index", "name", "using", "<table_name", ">", "_<name", ">", "format", "since", "indexes", "have", "to", "have", "a", "globally", "unique", "name", "in", "postgres" ]
python
train
43.304348
dokterbob/django-multilingual-model
multilingual_model/models.py
https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/models.py#L44-L90
def _get_translation(self, field, code): """ Gets the translation of a specific field for a specific language code. This raises ObjectDoesNotExist if the lookup was unsuccesful. As of today, this stuff is cached. As the cache is rather aggressive it might cause rather strange effects. However, we would see the same effects when an ordinary object is changed which is already in memory: the old state would remain. """ if not code in self._translation_cache: translations = self.translations.select_related() logger.debug( u'Matched with field %s for language %s. Attempting lookup.', field, code ) try: translation_obj = translations.get(language_code=code) except ObjectDoesNotExist: translation_obj = None self._translation_cache[code] = translation_obj logger.debug(u'Translation not found in cache.') else: logger.debug(u'Translation found in cache.') # Get the translation from the cache translation_obj = self._translation_cache.get(code) # If this is none, it means that a translation does not exist # It is important to cache this one as well if not translation_obj: raise ObjectDoesNotExist field_value = getattr(translation_obj, field) logger.debug( u'Found translation object %s, returning value %s.', translation_obj, field_value ) return field_value
[ "def", "_get_translation", "(", "self", ",", "field", ",", "code", ")", ":", "if", "not", "code", "in", "self", ".", "_translation_cache", ":", "translations", "=", "self", ".", "translations", ".", "select_related", "(", ")", "logger", ".", "debug", "(", ...
Gets the translation of a specific field for a specific language code. This raises ObjectDoesNotExist if the lookup was unsuccesful. As of today, this stuff is cached. As the cache is rather aggressive it might cause rather strange effects. However, we would see the same effects when an ordinary object is changed which is already in memory: the old state would remain.
[ "Gets", "the", "translation", "of", "a", "specific", "field", "for", "a", "specific", "language", "code", "." ]
python
train
33.744681
flatangle/flatlib
flatlib/predictives/returns.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/predictives/returns.py#L18-L26
def _computeChart(chart, date): """ Internal function to return a new chart for a specific date using properties from old chart. """ pos = chart.pos hsys = chart.hsys IDs = [obj.id for obj in chart.objects] return Chart(date, pos, IDs=IDs, hsys=hsys)
[ "def", "_computeChart", "(", "chart", ",", "date", ")", ":", "pos", "=", "chart", ".", "pos", "hsys", "=", "chart", ".", "hsys", "IDs", "=", "[", "obj", ".", "id", "for", "obj", "in", "chart", ".", "objects", "]", "return", "Chart", "(", "date", ...
Internal function to return a new chart for a specific date using properties from old chart.
[ "Internal", "function", "to", "return", "a", "new", "chart", "for", "a", "specific", "date", "using", "properties", "from", "old", "chart", "." ]
python
train
30.555556
tango-controls/pytango
tango/databaseds/database.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/databaseds/database.py#L1096-L1104
def DbGetClassForDevice(self, argin): """ Get Tango class for the specified device. :param argin: Device name :type: tango.DevString :return: Device Tango class :rtype: tango.DevString """ self._log.debug("In DbGetClassForDevice()") return self.db.get_class_for_device(argin)
[ "def", "DbGetClassForDevice", "(", "self", ",", "argin", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"In DbGetClassForDevice()\"", ")", "return", "self", ".", "db", ".", "get_class_for_device", "(", "argin", ")" ]
Get Tango class for the specified device. :param argin: Device name :type: tango.DevString :return: Device Tango class :rtype: tango.DevString
[ "Get", "Tango", "class", "for", "the", "specified", "device", "." ]
python
train
36
numenta/htmresearch
htmresearch/frameworks/sp_paper/sp_metrics.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L621-L673
def inspectSpatialPoolerStats(sp, inputVectors, saveFigPrefix=None): """ Inspect the statistics of a spatial pooler given a set of input vectors @param sp: an spatial pooler instance @param inputVectors: a set of input vectors """ numInputVector, inputSize = inputVectors.shape numColumns = np.prod(sp.getColumnDimensions()) outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType) inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType) connectedCounts = np.zeros((numColumns, ), dtype=uintType) sp.getConnectedCounts(connectedCounts) winnerInputOverlap = np.zeros(numInputVector) for i in range(numInputVector): sp.compute(inputVectors[i][:], False, outputColumns[i][:]) inputOverlap[i][:] = sp.getOverlaps() activeColumns = np.where(outputColumns[i][:] > 0)[0] if len(activeColumns) > 0: winnerInputOverlap[i] = np.mean( inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]]) avgInputOverlap = np.mean(inputOverlap, 0) entropy = calculateEntropy(outputColumns) activationProb = np.mean(outputColumns.astype(realDType), 0) dutyCycleDist, binEdge = np.histogram(activationProb, bins=10, range=[-0.005, 0.095]) dutyCycleDist = dutyCycleDist.astype('float32') / np.sum(dutyCycleDist) binCenter = (binEdge[1:] + binEdge[:-1])/2 fig, axs = plt.subplots(2, 2) axs[0, 0].hist(connectedCounts) axs[0, 0].set_xlabel('# Connected Synapse') axs[0, 1].hist(winnerInputOverlap) axs[0, 1].set_xlabel('# winner input overlap') axs[1, 0].bar(binEdge[:-1]+0.001, dutyCycleDist, width=.008) axs[1, 0].set_xlim([-0.005, .1]) axs[1, 0].set_xlabel('Activation Frequency') axs[1, 0].set_title('Entropy: {}'.format(entropy)) axs[1, 1].plot(connectedCounts, activationProb, '.') axs[1, 1].set_xlabel('connection #') axs[1, 1].set_ylabel('activation freq') plt.tight_layout() if saveFigPrefix is not None: plt.savefig('figures/{}_network_stats.pdf'.format(saveFigPrefix)) return fig
[ "def", "inspectSpatialPoolerStats", "(", "sp", ",", "inputVectors", ",", "saveFigPrefix", "=", "None", ")", ":", "numInputVector", ",", "inputSize", "=", "inputVectors", ".", "shape", "numColumns", "=", "np", ".", "prod", "(", "sp", ".", "getColumnDimensions", ...
Inspect the statistics of a spatial pooler given a set of input vectors @param sp: an spatial pooler instance @param inputVectors: a set of input vectors
[ "Inspect", "the", "statistics", "of", "a", "spatial", "pooler", "given", "a", "set", "of", "input", "vectors" ]
python
train
37.490566
twilio/twilio-python
twilio/rest/serverless/v1/service/environment/variable.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/serverless/v1/service/environment/variable.py#L281-L305
def update(self, key=values.unset, value=values.unset): """ Update the VariableInstance :param unicode key: The key :param unicode value: The value :returns: Updated VariableInstance :rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance """ data = values.of({'Key': key, 'Value': value, }) payload = self._version.update( 'POST', self._uri, data=data, ) return VariableInstance( self._version, payload, service_sid=self._solution['service_sid'], environment_sid=self._solution['environment_sid'], sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "key", "=", "values", ".", "unset", ",", "value", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'Key'", ":", "key", ",", "'Value'", ":", "value", ",", "}", ")", "payload", ...
Update the VariableInstance :param unicode key: The key :param unicode value: The value :returns: Updated VariableInstance :rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
[ "Update", "the", "VariableInstance" ]
python
train
28.92
Robin8Put/pmes
balance/handler.py
https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/balance/handler.py#L103-L178
async def freeze(self, *args, **kwargs): """ Freeze users balance Accepts: - uid [integer] (users id from main server) - coinid [string] (blockchain type in uppercase) - amount [integer] (amount for freezing) Returns: - uid [integer] (users id from main server) - coinid [string] (blockchain type in uppercase) - amount_active [integer] (activae users amount) - amount_frozen [integer] (frozen users amount) """ # Get data from request uid = kwargs.get("uid", 0) coinid = kwargs.get("coinid") amount = kwargs.get("amount") address = kwargs.get("address") try: coinid = coinid.replace("TEST", "") except: pass try: uid = int(uid) except: return await self.error_400("User id must be integer. ") try: amount = int(amount) except: return await self.error_400("Amount must be integer. ") try: assert amount > 0 except: return await self.error_400("Amount must be positive integer. ") # Check if required fields exists if not uid and address: uid = await self.get_uid_by_address(address=address, coinid=coinid) if isinstance(uid, dict): return uid # Connect to appropriate database database = self.client[self.collection] collection = database[coinid] # Check if balance exists balance = await collection.find_one({"uid":uid}) if not balance: return await self.error_404( "Freeze. Balance with uid:%s and type:%s not found." % (uid, coinid)) # Check if amount is enough difference = int(balance["amount_active"]) - int(amount) if difference < 0: return await self.error_403("Freeze. Insufficient amount in the account") # Decrement active amount and increment frozen amount amount_frozen = int(balance["amount_frozen"]) + int(amount) await collection.find_one_and_update({"uid":uid}, {"$set":{"amount_active":str(difference), "amount_frozen":str(amount_frozen)}}) # Return updated balance with excluded mongo _id field result = await collection.find_one({"uid":uid}) result["amount_frozen"] = int(result["amount_frozen"]) result["amount_active"] = int(result["amount_active"]) del result["_id"] return result
[ "async", "def", "freeze", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Get data from request", "uid", "=", "kwargs", ".", "get", "(", "\"uid\"", ",", "0", ")", "coinid", "=", "kwargs", ".", "get", "(", "\"coinid\"", ")", "amou...
Freeze users balance Accepts: - uid [integer] (users id from main server) - coinid [string] (blockchain type in uppercase) - amount [integer] (amount for freezing) Returns: - uid [integer] (users id from main server) - coinid [string] (blockchain type in uppercase) - amount_active [integer] (activae users amount) - amount_frozen [integer] (frozen users amount)
[ "Freeze", "users", "balance" ]
python
train
27.657895
uw-it-aca/uw-restclients
restclients/trumba/calendar.py
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/trumba/calendar.py#L268-L292
def _check_err(data): """ :param data: response json data object (must be not None). Check possible error code returned in the response body raise the coresponding exceptions """ if data['d'] is None: raise NoDataReturned() if data['d']['Messages'] is None: return msg = data['d']['Messages'] if len(msg) == 0 or msg[0]['Code'] is None: raise UnknownError() code = int(msg[0]['Code']) if code == 3006: raise CalendarNotExist() elif code == 3007: raise CalendarOwnByDiffAccount() else: logger.warn( "Unexpected Error Code: %s %s" % ( code, msg[0]['Description'])) raise UnexpectedError()
[ "def", "_check_err", "(", "data", ")", ":", "if", "data", "[", "'d'", "]", "is", "None", ":", "raise", "NoDataReturned", "(", ")", "if", "data", "[", "'d'", "]", "[", "'Messages'", "]", "is", "None", ":", "return", "msg", "=", "data", "[", "'d'", ...
:param data: response json data object (must be not None). Check possible error code returned in the response body raise the coresponding exceptions
[ ":", "param", "data", ":", "response", "json", "data", "object", "(", "must", "be", "not", "None", ")", ".", "Check", "possible", "error", "code", "returned", "in", "the", "response", "body", "raise", "the", "coresponding", "exceptions" ]
python
train
28
ethereum/py-evm
eth/tools/fixtures/loading.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/tools/fixtures/loading.py#L54-L60
def load_json_fixture(fixture_path: str) -> Dict[str, Any]: """ Loads a fixture file, caching the most recent files it loaded. """ with open(fixture_path) as fixture_file: file_fixtures = json.load(fixture_file) return file_fixtures
[ "def", "load_json_fixture", "(", "fixture_path", ":", "str", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "with", "open", "(", "fixture_path", ")", "as", "fixture_file", ":", "file_fixtures", "=", "json", ".", "load", "(", "fixture_file", ")", "r...
Loads a fixture file, caching the most recent files it loaded.
[ "Loads", "a", "fixture", "file", "caching", "the", "most", "recent", "files", "it", "loaded", "." ]
python
train
36.285714
arviz-devs/arviz
arviz/stats/stats.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/stats/stats.py#L331-L384
def _logsumexp(ary, *, b=None, b_inv=None, axis=None, keepdims=False, out=None, copy=True): """Stable logsumexp when b >= 0 and b is scalar. b_inv overwrites b unless b_inv is None. """ # check dimensions for result arrays ary = np.asarray(ary) if ary.dtype.kind == "i": ary = ary.astype(np.float64) dtype = ary.dtype.type shape = ary.shape shape_len = len(shape) if isinstance(axis, Sequence): axis = tuple(axis_i if axis_i >= 0 else shape_len + axis_i for axis_i in axis) agroup = axis else: axis = axis if (axis is None) or (axis >= 0) else shape_len + axis agroup = (axis,) shape_max = ( tuple(1 for _ in shape) if axis is None else tuple(1 if i in agroup else d for i, d in enumerate(shape)) ) # create result arrays if out is None: if not keepdims: out_shape = ( tuple() if axis is None else tuple(d for i, d in enumerate(shape) if i not in agroup) ) else: out_shape = shape_max out = np.empty(out_shape, dtype=dtype) if b_inv == 0: return np.full_like(out, np.inf, dtype=dtype) if out.shape else np.inf if b_inv is None and b == 0: return np.full_like(out, -np.inf) if out.shape else -np.inf ary_max = np.empty(shape_max, dtype=dtype) # calculations ary.max(axis=axis, keepdims=True, out=ary_max) if copy: ary = ary.copy() ary -= ary_max np.exp(ary, out=ary) ary.sum(axis=axis, keepdims=keepdims, out=out) np.log(out, out=out) if b_inv is not None: ary_max -= np.log(b_inv) elif b: ary_max += np.log(b) out += ary_max.squeeze() if not keepdims else ary_max # transform to scalar if possible return out if out.shape else dtype(out)
[ "def", "_logsumexp", "(", "ary", ",", "*", ",", "b", "=", "None", ",", "b_inv", "=", "None", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ",", "out", "=", "None", ",", "copy", "=", "True", ")", ":", "# check dimensions for result arrays", ...
Stable logsumexp when b >= 0 and b is scalar. b_inv overwrites b unless b_inv is None.
[ "Stable", "logsumexp", "when", "b", ">", "=", "0", "and", "b", "is", "scalar", "." ]
python
train
33.648148
lablup/backend.ai-client-py
src/ai/backend/client/cli/admin/sessions.py
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/admin/sessions.py#L24-L133
def sessions(status, access_key, id_only, all): ''' List and manage compute sessions. ''' fields = [ ('Session ID', 'sess_id'), ] with Session() as session: if is_admin(session): fields.append(('Owner', 'access_key')) if not id_only: fields.extend([ ('Image', 'image'), ('Tag', 'tag'), ('Created At', 'created_at',), ('Terminated At', 'terminated_at'), ('Status', 'status'), ('Occupied Resource', 'occupied_slots'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Max Used Memory (MiB)', 'mem_max_bytes'), ('CPU Using (%)', 'cpu_using'), ]) if is_legacy_server(): del fields[2] def execute_paginated_query(limit, offset): q = ''' query($limit:Int!, $offset:Int!, $ak:String, $status:String) { compute_session_list( limit:$limit, offset:$offset, access_key:$ak, status:$status) { items { $fields } total_count } }''' q = textwrap.dedent(q).strip() q = q.replace('$fields', ' '.join(item[1] for item in fields)) v = { 'limit': limit, 'offset': offset, 'status': status if status != 'ALL' else None, 'ak': access_key, } try: resp = session.Admin.query(q, v) except Exception as e: print_error(e) sys.exit(1) return resp['compute_session_list'] def round_mem(items): for item in items: if 'mem_cur_bytes' in item: item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1) if 'mem_max_bytes' in item: item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1) return items def _generate_paginated_results(interval): offset = 0 is_first = True total_count = -1 while True: limit = (interval if is_first else min(interval, total_count - offset)) try: result = execute_paginated_query(limit, offset) except Exception as e: print_error(e) sys.exit(1) offset += interval total_count = result['total_count'] items = result['items'] items = round_mem(items) if id_only: yield '\n'.join([item['sess_id'] for item in items]) + '\n' else: table = tabulate([item.values() for item in items], headers=(item[0] for item in fields)) if not is_first: table_rows = table.split('\n') table = '\n'.join(table_rows[2:]) yield table + '\n' if is_first: is_first = False if not offset < total_count: break with Session() as session: paginating_interval = 10 if all: click.echo_via_pager(_generate_paginated_results(paginating_interval)) else: result = execute_paginated_query(paginating_interval, offset=0) total_count = result['total_count'] if total_count == 0: print('There are no compute sessions currently {0}.' .format(status.lower())) return items = result['items'] items = round_mem(items) if id_only: for item in items: print(item['sess_id']) else: print(tabulate([item.values() for item in items], headers=(item[0] for item in fields))) if total_count > paginating_interval: print("More sessions can be displayed by using --all option.")
[ "def", "sessions", "(", "status", ",", "access_key", ",", "id_only", ",", "all", ")", ":", "fields", "=", "[", "(", "'Session ID'", ",", "'sess_id'", ")", ",", "]", "with", "Session", "(", ")", "as", "session", ":", "if", "is_admin", "(", "session", ...
List and manage compute sessions.
[ "List", "and", "manage", "compute", "sessions", "." ]
python
train
34.790909
ampl/amplpy
amplpy/utils.py
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/utils.py#L11-L45
def register_magics(store_name='_ampl_cells', ampl_object=None): """ Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``. Args: store_name: Name of the store where ``%%ampl cells`` will be stored. ampl_object: Object used to evaluate ``%%ampl_eval`` cells. """ from IPython.core.magic import ( Magics, magics_class, cell_magic, line_magic ) @magics_class class StoreAMPL(Magics): def __init__(self, shell=None, **kwargs): Magics.__init__(self, shell=shell, **kwargs) self._store = [] shell.user_ns[store_name] = self._store @cell_magic def ampl(self, line, cell): """Store the cell in the store""" self._store.append(cell) @cell_magic def ampl_eval(self, line, cell): """Evaluate the cell""" ampl_object.eval(cell) @line_magic def get_ampl(self, line): """Retrieve the store""" return self._store get_ipython().register_magics(StoreAMPL)
[ "def", "register_magics", "(", "store_name", "=", "'_ampl_cells'", ",", "ampl_object", "=", "None", ")", ":", "from", "IPython", ".", "core", ".", "magic", "import", "(", "Magics", ",", "magics_class", ",", "cell_magic", ",", "line_magic", ")", "@", "magics_...
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``. Args: store_name: Name of the store where ``%%ampl cells`` will be stored. ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
[ "Register", "jupyter", "notebook", "magics", "%%ampl", "and", "%%ampl_eval", "." ]
python
train
29.914286
pyGrowler/Growler
growler/http/response.py
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L208-L226
def send_text(self, txt, status=200): """ Sends plaintext response to client. Automatically sets the content-type header to text/plain. If txt is not a string, it will be formatted as one. Parameters ---------- txt : str The plaintext string to be sent back to the client status : int, optional The HTTP status code, defaults to 200 (OK) """ self.headers.setdefault('content-type', 'text/plain') if not isinstance(txt, bytes): txt = str(txt).encode() self.message = txt self.status_code = status self.end()
[ "def", "send_text", "(", "self", ",", "txt", ",", "status", "=", "200", ")", ":", "self", ".", "headers", ".", "setdefault", "(", "'content-type'", ",", "'text/plain'", ")", "if", "not", "isinstance", "(", "txt", ",", "bytes", ")", ":", "txt", "=", "...
Sends plaintext response to client. Automatically sets the content-type header to text/plain. If txt is not a string, it will be formatted as one. Parameters ---------- txt : str The plaintext string to be sent back to the client status : int, optional The HTTP status code, defaults to 200 (OK)
[ "Sends", "plaintext", "response", "to", "client", ".", "Automatically", "sets", "the", "content", "-", "type", "header", "to", "text", "/", "plain", ".", "If", "txt", "is", "not", "a", "string", "it", "will", "be", "formatted", "as", "one", "." ]
python
train
33.526316
abau171/highfive
highfive/jobs.py
https://github.com/abau171/highfive/blob/07b3829331072035ab100d1d66deca3e8f3f372a/highfive/jobs.py#L466-L479
def close(self): """ Closes the job manager. No more jobs will be assigned, no more job sets will be added, and any queued or active job sets will be cancelled. """ if self._closed: return self._closed = True if self._active_js is not None: self._active_js.cancel() for js in self._js_queue: js.cancel()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_closed", ":", "return", "self", ".", "_closed", "=", "True", "if", "self", ".", "_active_js", "is", "not", "None", ":", "self", ".", "_active_js", ".", "cancel", "(", ")", "for", "js", "in...
Closes the job manager. No more jobs will be assigned, no more job sets will be added, and any queued or active job sets will be cancelled.
[ "Closes", "the", "job", "manager", ".", "No", "more", "jobs", "will", "be", "assigned", "no", "more", "job", "sets", "will", "be", "added", "and", "any", "queued", "or", "active", "job", "sets", "will", "be", "cancelled", "." ]
python
test
28
RacingTadpole/django-singleton-admin
django_singleton_admin/admin.py
https://github.com/RacingTadpole/django-singleton-admin/blob/0a81454be11fdcbaf95ca5018667a8dff3f45bf7/django_singleton_admin/admin.py#L44-L55
def add_view(self, *args, **kwargs): """ Redirect to the change view if the singleton instance exists. """ try: singleton = self.model.objects.get() except (self.model.DoesNotExist, self.model.MultipleObjectsReturned): kwargs.setdefault("extra_context", {}) kwargs["extra_context"]["singleton"] = True response = super(SingletonAdmin, self).add_view(*args, **kwargs) return self.handle_save(args[0], response) return redirect(admin_url(self.model, "change", singleton.id))
[ "def", "add_view", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "singleton", "=", "self", ".", "model", ".", "objects", ".", "get", "(", ")", "except", "(", "self", ".", "model", ".", "DoesNotExist", ",", "self", ...
Redirect to the change view if the singleton instance exists.
[ "Redirect", "to", "the", "change", "view", "if", "the", "singleton", "instance", "exists", "." ]
python
train
47.416667
linuxsoftware/ls.joyous
ls/joyous/models/events.py
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L951-L958
def next_date(self): """ Date when this event is next scheduled to occur in the local time zone (Does not include postponements, but does exclude cancellations) """ nextDt = self.__localAfter(timezone.localtime(), dt.time.min) if nextDt is not None: return nextDt.date()
[ "def", "next_date", "(", "self", ")", ":", "nextDt", "=", "self", ".", "__localAfter", "(", "timezone", ".", "localtime", "(", ")", ",", "dt", ".", "time", ".", "min", ")", "if", "nextDt", "is", "not", "None", ":", "return", "nextDt", ".", "date", ...
Date when this event is next scheduled to occur in the local time zone (Does not include postponements, but does exclude cancellations)
[ "Date", "when", "this", "event", "is", "next", "scheduled", "to", "occur", "in", "the", "local", "time", "zone", "(", "Does", "not", "include", "postponements", "but", "does", "exclude", "cancellations", ")" ]
python
train
40.375
jcconnell/python-magicseaweed
magicseaweed/__init__.py
https://github.com/jcconnell/python-magicseaweed/blob/b22d5f22a134532ac6ab7fc274ee768e85f624a0/magicseaweed/__init__.py#L164-L169
def get_current(self): """Get current forecast.""" now = dt.now().timestamp() url = build_url(self.api_key, self.spot_id, self.fields, self.unit, now, now) return get_msw(url)
[ "def", "get_current", "(", "self", ")", ":", "now", "=", "dt", ".", "now", "(", ")", ".", "timestamp", "(", ")", "url", "=", "build_url", "(", "self", ".", "api_key", ",", "self", ".", "spot_id", ",", "self", ".", "fields", ",", "self", ".", "uni...
Get current forecast.
[ "Get", "current", "forecast", "." ]
python
train
37.666667
saltstack/salt
salt/fileclient.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L742-L792
def get_template( self, url, dest, template='jinja', makedirs=False, saltenv='base', cachedir=None, **kwargs): ''' Cache a file then process it as a template ''' if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') kwargs['saltenv'] = saltenv url_data = urlparse(url) sfn = self.cache_file(url, saltenv, cachedir=cachedir) if not sfn or not os.path.exists(sfn): return '' if template in salt.utils.templates.TEMPLATE_REGISTRY: data = salt.utils.templates.TEMPLATE_REGISTRY[template]( sfn, **kwargs ) else: log.error( 'Attempted to render template with unavailable engine %s', template ) return '' if not data['result']: # Failed to render the template log.error('Failed to render template with error: %s', data['data']) return '' if not dest: # No destination passed, set the dest as an extrn_files cache dest = self._extrn_path(url, saltenv, cachedir=cachedir) # If Salt generated the dest name, create any required dirs makedirs = True destdir = os.path.dirname(dest) if not os.path.isdir(destdir): if makedirs: os.makedirs(destdir) else: salt.utils.files.safe_rm(data['data']) return '' shutil.move(data['data'], dest) return dest
[ "def", "get_template", "(", "self", ",", "url", ",", "dest", ",", "template", "=", "'jinja'", ",", "makedirs", "=", "False", ",", "saltenv", "=", "'base'", ",", "cachedir", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "'env'", "in", "kwargs"...
Cache a file then process it as a template
[ "Cache", "a", "file", "then", "process", "it", "as", "a", "template" ]
python
train
32.372549
takuti/flurs
flurs/utils/metric.py
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L135-L156
def mpr(truth, recommend): """Mean Percentile Rank (MPR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: MPR. """ if len(recommend) == 0 and len(truth) == 0: return 0. # best elif len(truth) == 0 or len(truth) == 0: return 100. # worst accum = 0. n_recommend = recommend.size for t in truth: r = np.where(recommend == t)[0][0] / float(n_recommend) accum += r return accum * 100. / truth.size
[ "def", "mpr", "(", "truth", ",", "recommend", ")", ":", "if", "len", "(", "recommend", ")", "==", "0", "and", "len", "(", "truth", ")", "==", "0", ":", "return", "0.", "# best", "elif", "len", "(", "truth", ")", "==", "0", "or", "len", "(", "tr...
Mean Percentile Rank (MPR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: MPR.
[ "Mean", "Percentile", "Rank", "(", "MPR", ")", "." ]
python
train
25.318182
industrial-optimization-group/DESDEO
desdeo/utils/tui.py
https://github.com/industrial-optimization-group/DESDEO/blob/c7aebe8adb20942d200b9a411d4cdec21f5f4bff/desdeo/utils/tui.py#L146-L166
def _prompt_wrapper(message, default=None, validator=None): """ Handle references piped from file """ class MockDocument: def __init__(self, text): self.text = text if HAS_INPUT: ret = prompt(message, default=default, validator=validator) else: ret = sys.stdin.readline().strip() print(message, ret) if validator: validator.validate(MockDocument(ret)) if "q" in ret: if not HAS_OUTPUT: print("User exit") sys.exit("User exit") return ret
[ "def", "_prompt_wrapper", "(", "message", ",", "default", "=", "None", ",", "validator", "=", "None", ")", ":", "class", "MockDocument", ":", "def", "__init__", "(", "self", ",", "text", ")", ":", "self", ".", "text", "=", "text", "if", "HAS_INPUT", ":...
Handle references piped from file
[ "Handle", "references", "piped", "from", "file" ]
python
train
25.714286
deepmipt/DeepPavlov
deeppavlov/core/commands/train.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/commands/train.py#L61-L66
def get_iterator_from_config(config: dict, data: dict): """Create iterator (from config) for specified data.""" iterator_config = config['dataset_iterator'] iterator: Union[DataLearningIterator, DataFittingIterator] = from_params(iterator_config, data=data) return iterator
[ "def", "get_iterator_from_config", "(", "config", ":", "dict", ",", "data", ":", "dict", ")", ":", "iterator_config", "=", "config", "[", "'dataset_iterator'", "]", "iterator", ":", "Union", "[", "DataLearningIterator", ",", "DataFittingIterator", "]", "=", "fro...
Create iterator (from config) for specified data.
[ "Create", "iterator", "(", "from", "config", ")", "for", "specified", "data", "." ]
python
test
60.166667
kurtbrose/pyjks
jks/rfc7292.py
https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/rfc7292.py#L69-L82
def _adjust(a, a_offset, b): """ a = bytearray a_offset = int b = bytearray """ x = (b[-1] & 0xFF) + (a[a_offset + len(b) - 1] & 0xFF) + 1 a[a_offset + len(b) - 1] = ctypes.c_ubyte(x).value x >>= 8 for i in range(len(b)-2, -1, -1): x += (b[i] & 0xFF) + (a[a_offset + i] & 0xFF) a[a_offset + i] = ctypes.c_ubyte(x).value x >>= 8
[ "def", "_adjust", "(", "a", ",", "a_offset", ",", "b", ")", ":", "x", "=", "(", "b", "[", "-", "1", "]", "&", "0xFF", ")", "+", "(", "a", "[", "a_offset", "+", "len", "(", "b", ")", "-", "1", "]", "&", "0xFF", ")", "+", "1", "a", "[", ...
a = bytearray a_offset = int b = bytearray
[ "a", "=", "bytearray", "a_offset", "=", "int", "b", "=", "bytearray" ]
python
train
26.785714
gpoulter/fablib
fablib.py
https://github.com/gpoulter/fablib/blob/5d14c4d998f79dd1aa3207063c3d06e30e3e2bf9/fablib.py#L67-L79
def cron(name, timespec, user, command, environ=None, disable=False): """Create entry in /etc/cron.d""" path = '/etc/cron.d/{}'.format(name) if disable: sudo('rm ' + path) return entry = '{}\t{}\t{}\n'.format(timespec, user, command) if environ: envstr = '\n'.join('{}={}'.format(k, v) for k, v in environ.iteritems()) entry = '{}\n{}'.format(envstr, entry) chput(StringIO(entry), path, use_sudo=True, mode=0o644, user='root', group='root')
[ "def", "cron", "(", "name", ",", "timespec", ",", "user", ",", "command", ",", "environ", "=", "None", ",", "disable", "=", "False", ")", ":", "path", "=", "'/etc/cron.d/{}'", ".", "format", "(", "name", ")", "if", "disable", ":", "sudo", "(", "'rm '...
Create entry in /etc/cron.d
[ "Create", "entry", "in", "/", "etc", "/", "cron", ".", "d" ]
python
train
40.076923
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/gallery/gallery_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/gallery/gallery_client.py#L1103-L1119
def delete_publisher_asset(self, publisher_name, asset_type=None): """DeletePublisherAsset. [Preview API] Delete publisher asset like logo :param str publisher_name: Internal name of the publisher :param str asset_type: Type of asset. Default value is 'logo'. """ route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') query_parameters = {} if asset_type is not None: query_parameters['assetType'] = self._serialize.query('asset_type', asset_type, 'str') self._send(http_method='DELETE', location_id='21143299-34f9-4c62-8ca8-53da691192f9', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters)
[ "def", "delete_publisher_asset", "(", "self", ",", "publisher_name", ",", "asset_type", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "publisher_name", "is", "not", "None", ":", "route_values", "[", "'publisherName'", "]", "=", "self", ".", "_s...
DeletePublisherAsset. [Preview API] Delete publisher asset like logo :param str publisher_name: Internal name of the publisher :param str asset_type: Type of asset. Default value is 'logo'.
[ "DeletePublisherAsset", ".", "[", "Preview", "API", "]", "Delete", "publisher", "asset", "like", "logo", ":", "param", "str", "publisher_name", ":", "Internal", "name", "of", "the", "publisher", ":", "param", "str", "asset_type", ":", "Type", "of", "asset", ...
python
train
51.588235
wbond/asn1crypto
dev/deps.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/dev/deps.py#L126-L157
def _archive_single_dir(archive): """ Check if all members of the archive are in a single top-level directory :param archive: An archive from _open_archive() :return: None if not a single top level directory in archive, otherwise a unicode string of the top level directory name """ common_root = None for info in _list_archive_members(archive): fn = _info_name(info) if fn in set(['.', '/']): continue sep = None if '/' in fn: sep = '/' elif '\\' in fn: sep = '\\' if sep is None: root_dir = fn else: root_dir, _ = fn.split(sep, 1) if common_root is None: common_root = root_dir else: if common_root != root_dir: return None return common_root
[ "def", "_archive_single_dir", "(", "archive", ")", ":", "common_root", "=", "None", "for", "info", "in", "_list_archive_members", "(", "archive", ")", ":", "fn", "=", "_info_name", "(", "info", ")", "if", "fn", "in", "set", "(", "[", "'.'", ",", "'/'", ...
Check if all members of the archive are in a single top-level directory :param archive: An archive from _open_archive() :return: None if not a single top level directory in archive, otherwise a unicode string of the top level directory name
[ "Check", "if", "all", "members", "of", "the", "archive", "are", "in", "a", "single", "top", "-", "level", "directory" ]
python
train
26.375
edx/edx-enterprise
integrated_channels/xapi/management/commands/send_course_completions.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/xapi/management/commands/send_course_completions.py#L122-L147
def send_xapi_statements(self, lrs_configuration, days): """ Send xAPI analytics data of the enterprise learners to the given LRS. Arguments: lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations of the LRS where to send xAPI learner analytics. days (int): Include course enrollment of this number of days. """ persistent_course_grades = self.get_course_completions(lrs_configuration.enterprise_customer, days) users = self.prefetch_users(persistent_course_grades) course_overviews = self.prefetch_courses(persistent_course_grades) for persistent_course_grade in persistent_course_grades: try: user = users.get(persistent_course_grade.user_id) course_overview = course_overviews.get(persistent_course_grade.course_id) course_grade = CourseGradeFactory().read(user, course_key=persistent_course_grade.course_id) send_course_completion_statement(lrs_configuration, user, course_overview, course_grade) except ClientError: LOGGER.exception( 'Client error while sending course completion to xAPI for' ' enterprise customer {enterprise_customer}.'.format( enterprise_customer=lrs_configuration.enterprise_customer.name ) )
[ "def", "send_xapi_statements", "(", "self", ",", "lrs_configuration", ",", "days", ")", ":", "persistent_course_grades", "=", "self", ".", "get_course_completions", "(", "lrs_configuration", ".", "enterprise_customer", ",", "days", ")", "users", "=", "self", ".", ...
Send xAPI analytics data of the enterprise learners to the given LRS. Arguments: lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations of the LRS where to send xAPI learner analytics. days (int): Include course enrollment of this number of days.
[ "Send", "xAPI", "analytics", "data", "of", "the", "enterprise", "learners", "to", "the", "given", "LRS", "." ]
python
valid
55.461538
PmagPy/PmagPy
pmagpy/ipmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L4676-L5466
def specimens_results_magic(infile='pmag_specimens.txt', measfile='magic_measurements.txt', sampfile='er_samples.txt', sitefile='er_sites.txt', agefile='er_ages.txt', specout='er_specimens.txt', sampout='pmag_samples.txt', siteout='pmag_sites.txt', resout='pmag_results.txt', critout='pmag_criteria.txt', instout='magic_instruments.txt', plotsites=False, fmt='svg', dir_path='.', cors=[], priorities=['DA-AC-ARM', 'DA-AC-TRM'], coord='g', user='', vgps_level='site', do_site_intensity=True, DefaultAge=["none"], avg_directions_by_sample=False, avg_intensities_by_sample=False, avg_all_components=False, avg_by_polarity=False, skip_directions=False, skip_intensities=False, use_sample_latitude=False, use_paleolatitude=False, use_criteria='default'): """ Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages. @param -> infile: path from the WD to the pmag speciemns table @param -> measfile: path from the WD to the magic measurement file @param -> sampfile: path from the WD to the er sample file @param -> sitefile: path from the WD to the er sites data file @param -> agefile: path from the WD to the er ages data file @param -> specout: path from the WD to the place to write the er specimens data file @param -> sampout: path from the WD to the place to write the pmag samples data file @param -> siteout: path from the WD to the place to write the pmag sites data file @param -> resout: path from the WD to the place to write the pmag results data file @param -> critout: path from the WD to the place to write the pmag criteria file @param -> instout: path from th WD to the place to write the magic instruments file @param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string """ # initialize some variables plotsites = False # cannot use draw_figs from within ipmag Comps = [] # list of components version_num = pmag.get_version() args = sys.argv model_lat_file = "" Dcrit, Icrit, nocrit = 0, 0, 0 corrections = [] nocorrection = ['DA-NL', 'DA-AC', 'DA-CR'] # do some data adjustments for cor in cors: nocorrection.remove('DA-' + cor) corrections.append('DA-' + cor) for p in priorities: if not p.startswith('DA-AC-'): p = 'DA-AC-' + p # translate coord into coords if coord == 's': coords = ['-1'] if coord == 'g': coords = ['0'] if coord == 't': coords = ['100'] if coord == 'b': coords = ['0', '100'] if vgps_level == 'sample': vgps = 1 # save sample level VGPS/VADMs else: vgps = 0 # site level if do_site_intensity: nositeints = 0 else: nositeints = 1 # chagne these all to True/False instead of 1/0 if not skip_intensities: # set model lat and if use_sample_latitude and use_paleolatitude: print("you should set a paleolatitude file OR use present day lat - not both") return False elif use_sample_latitude: get_model_lat = 1 elif use_paleolatitude: get_model_lat = 2 try: model_lat_file = dir_path + '/' + args[ind + 1] get_model_lat = 2 mlat = open(model_lat_file, 'r') ModelLats = [] for line in mlat.readlines(): ModelLat = {} tmp = line.split() ModelLat["er_site_name"] = tmp[0] ModelLat["site_model_lat"] = tmp[1] ModelLat["er_sample_name"] = tmp[0] ModelLat["sample_lat"] = tmp[1] ModelLats.append(ModelLat) mlat.clos() except: print("use_paleolatitude option requires a valid paleolatitude file") else: get_model_lat = 0 # skips VADM calculation entirely if plotsites and not skip_directions: # plot by site - set up plot window EQ = {} EQ['eqarea'] = 1 # define figure 1 as equal area projection pmagplotlib.plot_init(EQ['eqarea'], 5, 5) # I don't know why this has to be here, but otherwise the first plot # never plots... pmagplotlib.plot_net(EQ['eqarea']) pmagplotlib.draw_figs(EQ) infile = os.path.join(dir_path, infile) measfile = os.path.join(dir_path, measfile) instout = os.path.join(dir_path, instout) sampfile = os.path.join(dir_path, sampfile) sitefile = os.path.join(dir_path, sitefile) agefile = os.path.join(dir_path, agefile) specout = os.path.join(dir_path, specout) sampout = os.path.join(dir_path, sampout) siteout = os.path.join(dir_path, siteout) resout = os.path.join(dir_path, resout) critout = os.path.join(dir_path, critout) if use_criteria == 'none': Dcrit, Icrit, nocrit = 1, 1, 1 # no selection criteria crit_data = pmag.default_criteria(nocrit) elif use_criteria == 'default': crit_data = pmag.default_criteria(nocrit) # use default criteria elif use_criteria == 'existing': crit_data, file_type = pmag.magic_read( critout) # use pmag_criteria file print("Acceptance criteria read in from ", critout) accept = {} for critrec in crit_data: for key in list(critrec.keys()): # need to migrate specimen_dang to specimen_int_dang for intensity # data using old format if 'IE-SPEC' in list(critrec.keys()) and 'specimen_dang' in list(critrec.keys()) and 'specimen_int_dang' not in list(critrec.keys()): critrec['specimen_int_dang'] = critrec['specimen_dang'] del critrec['specimen_dang'] # need to get rid of ron shaars sample_int_sigma_uT if 'sample_int_sigma_uT' in list(critrec.keys()): critrec['sample_int_sigma'] = '%10.3e' % ( eval(critrec['sample_int_sigma_uT']) * 1e-6) if key not in list(accept.keys()) and critrec[key] != '': accept[key] = critrec[key] if use_criteria == 'default': pmag.magic_write(critout, [accept], 'pmag_criteria') print("\n Pmag Criteria stored in ", critout, '\n') # now we're done slow dancing # read in site data - has the lats and lons SiteNFO, file_type = pmag.magic_read(sitefile) # read in site data - has the lats and lons SampNFO, file_type = pmag.magic_read(sampfile) # find all the sites with height info. height_nfo = pmag.get_dictitem(SiteNFO, 'site_height', '', 'F') if agefile: AgeNFO, file_type = pmag.magic_read( agefile) # read in the age information # read in specimen interpretations Data, file_type = pmag.magic_read(infile) # retrieve specimens with intensity data IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F') comment, orient = "", [] samples, sites = [], [] for rec in Data: # run through the data filling in missing keys and finding all components, coordinates available # fill in missing fields, collect unique sample and site names if 'er_sample_name' not in list(rec.keys()): rec['er_sample_name'] = "" elif rec['er_sample_name'] not in samples: samples.append(rec['er_sample_name']) if 'er_site_name' not in list(rec.keys()): rec['er_site_name'] = "" elif rec['er_site_name'] not in sites: sites.append(rec['er_site_name']) if 'specimen_int' not in list(rec.keys()): rec['specimen_int'] = '' if 'specimen_comp_name' not in list(rec.keys()) or rec['specimen_comp_name'] == "": rec['specimen_comp_name'] = 'A' if rec['specimen_comp_name'] not in Comps: Comps.append(rec['specimen_comp_name']) rec['specimen_tilt_correction'] = rec['specimen_tilt_correction'].strip( '\n') if "specimen_tilt_correction" not in list(rec.keys()): rec["specimen_tilt_correction"] = "-1" # assume sample coordinates if rec["specimen_tilt_correction"] not in orient: # collect available coordinate systems orient.append(rec["specimen_tilt_correction"]) if "specimen_direction_type" not in list(rec.keys()): # assume direction is line - not plane rec["specimen_direction_type"] = 'l' if "specimen_dec" not in list(rec.keys()): # if no declination, set direction type to blank rec["specimen_direction_type"] = '' if "specimen_n" not in list(rec.keys()): rec["specimen_n"] = '' # put in n if "specimen_alpha95" not in list(rec.keys()): rec["specimen_alpha95"] = '' # put in alpha95 if "magic_method_codes" not in list(rec.keys()): rec["magic_method_codes"] = '' # start parsing data into SpecDirs, SpecPlanes, SpecInts SpecInts, SpecDirs, SpecPlanes = [], [], [] samples.sort() # get sorted list of samples and sites sites.sort() if not skip_intensities: # don't skip intensities # retrieve specimens with intensity data IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F') if nocrit == 0: # use selection criteria for rec in IntData: # do selection criteria kill = pmag.grade(rec, accept, 'specimen_int') if len(kill) == 0: # intensity record to be included in sample, site # calculations SpecInts.append(rec) else: SpecInts = IntData[:] # take everything - no selection criteria # check for required data adjustments if len(corrections) > 0 and len(SpecInts) > 0: for cor in corrections: # only take specimens with the required corrections SpecInts = pmag.get_dictitem( SpecInts, 'magic_method_codes', cor, 'has') if len(nocorrection) > 0 and len(SpecInts) > 0: for cor in nocorrection: # exclude the corrections not specified for inclusion SpecInts = pmag.get_dictitem( SpecInts, 'magic_method_codes', cor, 'not') # take top priority specimen of its name in remaining specimens (only one # per customer) PrioritySpecInts = [] specimens = pmag.get_specs(SpecInts) # get list of uniq specimen names for spec in specimens: # all the records for this specimen ThisSpecRecs = pmag.get_dictitem( SpecInts, 'er_specimen_name', spec, 'T') if len(ThisSpecRecs) == 1: PrioritySpecInts.append(ThisSpecRecs[0]) elif len(ThisSpecRecs) > 1: # more than one prec = [] for p in priorities: # all the records for this specimen ThisSpecRecs = pmag.get_dictitem( SpecInts, 'magic_method_codes', p, 'has') if len(ThisSpecRecs) > 0: prec.append(ThisSpecRecs[0]) PrioritySpecInts.append(prec[0]) # take the best one SpecInts = PrioritySpecInts # this has the first specimen record if not skip_directions: # don't skip directions # retrieve specimens with directed lines and planes AllDirs = pmag.get_dictitem(Data, 'specimen_direction_type', '', 'F') # get all specimens with specimen_n information Ns = pmag.get_dictitem(AllDirs, 'specimen_n', '', 'F') if nocrit != 1: # use selection criteria for rec in Ns: # look through everything with specimen_n for "good" data kill = pmag.grade(rec, accept, 'specimen_dir') if len(kill) == 0: # nothing killed it SpecDirs.append(rec) else: # no criteria SpecDirs = AllDirs[:] # take them all # SpecDirs is now the list of all specimen directions (lines and planes) # that pass muster # list of all sample data and list of those that pass the DE-SAMP criteria PmagSamps, SampDirs = [], [] PmagSites, PmagResults = [], [] # list of all site data and selected results SampInts = [] for samp in samples: # run through the sample names if avg_directions_by_sample: # average by sample if desired # get all the directional data for this sample SampDir = pmag.get_dictitem(SpecDirs, 'er_sample_name', samp, 'T') if len(SampDir) > 0: # there are some directions for coord in coords: # step through desired coordinate systems # get all the directions for this sample CoordDir = pmag.get_dictitem( SampDir, 'specimen_tilt_correction', coord, 'T') if len(CoordDir) > 0: # there are some with this coordinate system if not avg_all_components: # look component by component for comp in Comps: # get all directions from this component CompDir = pmag.get_dictitem( CoordDir, 'specimen_comp_name', comp, 'T') if len(CompDir) > 0: # there are some # get a sample average from all specimens PmagSampRec = pmag.lnpbykey( CompDir, 'sample', 'specimen') # decorate the sample record PmagSampRec["er_location_name"] = CompDir[0]['er_location_name'] PmagSampRec["er_site_name"] = CompDir[0]['er_site_name'] PmagSampRec["er_sample_name"] = samp PmagSampRec["er_citation_names"] = "This study" PmagSampRec["er_analyst_mail_names"] = user PmagSampRec['magic_software_packages'] = version_num if CompDir[0]['specimen_flag'] == 'g': PmagSampRec['sample_flag'] = 'g' else: PmagSampRec['sample_flag'] = 'b' if nocrit != 1: PmagSampRec['pmag_criteria_codes'] = "ACCEPT" if agefile != "": PmagSampRec = pmag.get_age( PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge) site_height = pmag.get_dictitem( height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T') if len(site_height) > 0: # add in height if available PmagSampRec["sample_height"] = site_height[0]['site_height'] PmagSampRec['sample_comp_name'] = comp PmagSampRec['sample_tilt_correction'] = coord PmagSampRec['er_specimen_names'] = pmag.get_list( CompDir, 'er_specimen_name') # get a list of the specimen names used PmagSampRec['magic_method_codes'] = pmag.get_list( CompDir, 'magic_method_codes') # get a list of the methods used if nocrit != 1: # apply selection criteria kill = pmag.grade( PmagSampRec, accept, 'sample_dir') else: kill = [] if len(kill) == 0: SampDirs.append(PmagSampRec) if vgps == 1: # if sample level VGP info desired, do that now PmagResRec = pmag.getsampVGP( PmagSampRec, SiteNFO) if PmagResRec != "": PmagResults.append(PmagResRec) # print(PmagSampRec) PmagSamps.append(PmagSampRec) if avg_all_components: # average all components together basically same as above PmagSampRec = pmag.lnpbykey( CoordDir, 'sample', 'specimen') PmagSampRec["er_location_name"] = CoordDir[0]['er_location_name'] PmagSampRec["er_site_name"] = CoordDir[0]['er_site_name'] PmagSampRec["er_sample_name"] = samp PmagSampRec["er_citation_names"] = "This study" PmagSampRec["er_analyst_mail_names"] = user PmagSampRec['magic_software_packages'] = version_num if all(i['specimen_flag'] == 'g' for i in CoordDir): PmagSampRec['sample_flag'] = 'g' else: PmagSampRec['sample_flag'] = 'b' if nocrit != 1: PmagSampRec['pmag_criteria_codes'] = "" if agefile != "": PmagSampRec = pmag.get_age( PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge) site_height = pmag.get_dictitem( height_nfo, 'er_site_name', site, 'T') if len(site_height) > 0: # add in height if available PmagSampRec["sample_height"] = site_height[0]['site_height'] PmagSampRec['sample_tilt_correction'] = coord PmagSampRec['sample_comp_name'] = pmag.get_list( CoordDir, 'specimen_comp_name') # get components used PmagSampRec['er_specimen_names'] = pmag.get_list( CoordDir, 'er_specimen_name') # get specimne names averaged PmagSampRec['magic_method_codes'] = pmag.get_list( CoordDir, 'magic_method_codes') # assemble method codes if nocrit != 1: # apply selection criteria kill = pmag.grade( PmagSampRec, accept, 'sample_dir') if len(kill) == 0: # passes the mustard SampDirs.append(PmagSampRec) if vgps == 1: PmagResRec = pmag.getsampVGP( PmagSampRec, SiteNFO) if PmagResRec != "": PmagResults.append(PmagResRec) else: # take everything SampDirs.append(PmagSampRec) if vgps == 1: PmagResRec = pmag.getsampVGP( PmagSampRec, SiteNFO) if PmagResRec != "": PmagResults.append(PmagResRec) PmagSamps.append(PmagSampRec) if avg_intensities_by_sample: # average by sample if desired # get all the intensity data for this sample SampI = pmag.get_dictitem(SpecInts, 'er_sample_name', samp, 'T') if len(SampI) > 0: # there are some # get average intensity stuff PmagSampRec = pmag.average_int(SampI, 'specimen', 'sample') # decorate sample record PmagSampRec["sample_description"] = "sample intensity" PmagSampRec["sample_direction_type"] = "" PmagSampRec['er_site_name'] = SampI[0]["er_site_name"] PmagSampRec['er_sample_name'] = samp PmagSampRec['er_location_name'] = SampI[0]["er_location_name"] PmagSampRec["er_citation_names"] = "This study" PmagSampRec["er_analyst_mail_names"] = user if agefile != "": PmagSampRec = pmag.get_age( PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge) site_height = pmag.get_dictitem( height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T') if len(site_height) > 0: # add in height if available PmagSampRec["sample_height"] = site_height[0]['site_height'] PmagSampRec['er_specimen_names'] = pmag.get_list( SampI, 'er_specimen_name') PmagSampRec['magic_method_codes'] = pmag.get_list( SampI, 'magic_method_codes') if nocrit != 1: # apply criteria! kill = pmag.grade(PmagSampRec, accept, 'sample_int') if len(kill) == 0: PmagSampRec['pmag_criteria_codes'] = "ACCEPT" SampInts.append(PmagSampRec) PmagSamps.append(PmagSampRec) else: PmagSampRec = {} # sample rejected else: # no criteria SampInts.append(PmagSampRec) PmagSamps.append(PmagSampRec) PmagSampRec['pmag_criteria_codes'] = "" if vgps == 1 and get_model_lat != 0 and PmagSampRec != {}: if get_model_lat == 1: # use sample latitude PmagResRec = pmag.getsampVDM(PmagSampRec, SampNFO) # get rid of the model lat key del(PmagResRec['model_lat']) elif get_model_lat == 2: # use model latitude PmagResRec = pmag.getsampVDM(PmagSampRec, ModelLats) if PmagResRec != {}: PmagResRec['magic_method_codes'] = PmagResRec['magic_method_codes'] + ":IE-MLAT" if PmagResRec != {}: PmagResRec['er_specimen_names'] = PmagSampRec['er_specimen_names'] PmagResRec['er_sample_names'] = PmagSampRec['er_sample_name'] PmagResRec['pmag_criteria_codes'] = 'ACCEPT' PmagResRec['average_int_sigma_perc'] = PmagSampRec['sample_int_sigma_perc'] PmagResRec['average_int_sigma'] = PmagSampRec['sample_int_sigma'] PmagResRec['average_int_n'] = PmagSampRec['sample_int_n'] PmagResRec['vadm_n'] = PmagSampRec['sample_int_n'] PmagResRec['data_type'] = 'i' PmagResults.append(PmagResRec) if len(PmagSamps) > 0: # fill in missing keys from different types of records TmpSamps, keylist = pmag.fillkeys(PmagSamps) # save in sample output file pmag.magic_write(sampout, TmpSamps, 'pmag_samples') print(' sample averages written to ', sampout) # # create site averages from specimens or samples as specified # for site in sites: for coord in coords: if not avg_directions_by_sample: key, dirlist = 'specimen', SpecDirs # if specimen averages at site level desired if avg_directions_by_sample: key, dirlist = 'sample', SampDirs # if sample averages at site level desired # get all the sites with directions tmp = pmag.get_dictitem(dirlist, 'er_site_name', site, 'T') # use only the last coordinate if avg_all_components==False tmp1 = pmag.get_dictitem(tmp, key + '_tilt_correction', coord, 'T') # fish out site information (lat/lon, etc.) sd = pmag.get_dictitem(SiteNFO, 'er_site_name', site, 'T') if len(sd) > 0: sitedat = sd[0] if not avg_all_components: # do component wise averaging for comp in Comps: # get all components comp siteD = pmag.get_dictitem( tmp1, key + '_comp_name', comp, 'T') # remove bad data from means quality_siteD = [] # remove any records for which specimen_flag or sample_flag are 'b' # assume 'g' if flag is not provided for rec in siteD: spec_quality = rec.get('specimen_flag', 'g') samp_quality = rec.get('sample_flag', 'g') if (spec_quality == 'g') and (samp_quality == 'g'): quality_siteD.append(rec) siteD = quality_siteD if len(siteD) > 0: # there are some for this site and component name # get an average for this site PmagSiteRec = pmag.lnpbykey(siteD, 'site', key) # decorate the site record PmagSiteRec['site_comp_name'] = comp PmagSiteRec["er_location_name"] = siteD[0]['er_location_name'] PmagSiteRec["er_site_name"] = siteD[0]['er_site_name'] PmagSiteRec['site_tilt_correction'] = coord PmagSiteRec['site_comp_name'] = pmag.get_list( siteD, key + '_comp_name') if avg_directions_by_sample: PmagSiteRec['er_sample_names'] = pmag.get_list( siteD, 'er_sample_name') else: PmagSiteRec['er_specimen_names'] = pmag.get_list( siteD, 'er_specimen_name') # determine the demagnetization code (DC3,4 or 5) for this site AFnum = len(pmag.get_dictitem( siteD, 'magic_method_codes', 'LP-DIR-AF', 'has')) Tnum = len(pmag.get_dictitem( siteD, 'magic_method_codes', 'LP-DIR-T', 'has')) DC = 3 if AFnum > 0: DC += 1 if Tnum > 0: DC += 1 PmagSiteRec['magic_method_codes'] = pmag.get_list( siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC) PmagSiteRec['magic_method_codes'].strip(":") if plotsites: print(PmagSiteRec['er_site_name']) # plot and list the data pmagplotlib.plot_site( EQ['eqarea'], PmagSiteRec, siteD, key) pmagplotlib.draw_figs(EQ) PmagSites.append(PmagSiteRec) else: # last component only # get the last orientation system specified siteD = tmp1[:] if len(siteD) > 0: # there are some # get the average for this site PmagSiteRec = pmag.lnpbykey(siteD, 'site', key) # decorate the record PmagSiteRec["er_location_name"] = siteD[0]['er_location_name'] PmagSiteRec["er_site_name"] = siteD[0]['er_site_name'] PmagSiteRec['site_comp_name'] = comp PmagSiteRec['site_tilt_correction'] = coord PmagSiteRec['site_comp_name'] = pmag.get_list( siteD, key + '_comp_name') PmagSiteRec['er_specimen_names'] = pmag.get_list( siteD, 'er_specimen_name') PmagSiteRec['er_sample_names'] = pmag.get_list( siteD, 'er_sample_name') AFnum = len(pmag.get_dictitem( siteD, 'magic_method_codes', 'LP-DIR-AF', 'has')) Tnum = len(pmag.get_dictitem( siteD, 'magic_method_codes', 'LP-DIR-T', 'has')) DC = 3 if AFnum > 0: DC += 1 if Tnum > 0: DC += 1 PmagSiteRec['magic_method_codes'] = pmag.get_list( siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC) PmagSiteRec['magic_method_codes'].strip(":") if not avg_directions_by_sample: PmagSiteRec['site_comp_name'] = pmag.get_list( siteD, key + '_comp_name') if plotsites: pmagplotlib.plot_site( EQ['eqarea'], PmagSiteRec, siteD, key) pmagplotlib.draw_figs(EQ) PmagSites.append(PmagSiteRec) else: print('site information not found in er_sites for site, ', site, ' site will be skipped') for PmagSiteRec in PmagSites: # now decorate each dictionary some more, and calculate VGPs etc. for results table PmagSiteRec["er_citation_names"] = "This study" PmagSiteRec["er_analyst_mail_names"] = user PmagSiteRec['magic_software_packages'] = version_num if agefile != "": PmagSiteRec = pmag.get_age( PmagSiteRec, "er_site_name", "site_inferred_", AgeNFO, DefaultAge) PmagSiteRec['pmag_criteria_codes'] = 'ACCEPT' if 'site_n_lines' in list(PmagSiteRec.keys()) and 'site_n_planes' in list(PmagSiteRec.keys()) and PmagSiteRec['site_n_lines'] != "" and PmagSiteRec['site_n_planes'] != "": if int(PmagSiteRec["site_n_planes"]) > 0: PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM-LP" elif int(PmagSiteRec["site_n_lines"]) > 2: PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM" kill = pmag.grade(PmagSiteRec, accept, 'site_dir') if len(kill) == 0: PmagResRec = {} # set up dictionary for the pmag_results table entry PmagResRec['data_type'] = 'i' # decorate it a bit PmagResRec['magic_software_packages'] = version_num PmagSiteRec['site_description'] = 'Site direction included in results table' PmagResRec['pmag_criteria_codes'] = 'ACCEPT' dec = float(PmagSiteRec["site_dec"]) inc = float(PmagSiteRec["site_inc"]) if 'site_alpha95' in list(PmagSiteRec.keys()) and PmagSiteRec['site_alpha95'] != "": a95 = float(PmagSiteRec["site_alpha95"]) else: a95 = 180. sitedat = pmag.get_dictitem(SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')[ 0] # fish out site information (lat/lon, etc.) lat = float(sitedat['site_lat']) lon = float(sitedat['site_lon']) plon, plat, dp, dm = pmag.dia_vgp( dec, inc, a95, lat, lon) # get the VGP for this site if PmagSiteRec['site_tilt_correction'] == '-1': C = ' (spec coord) ' if PmagSiteRec['site_tilt_correction'] == '0': C = ' (geog. coord) ' if PmagSiteRec['site_tilt_correction'] == '100': C = ' (strat. coord) ' PmagResRec["pmag_result_name"] = "VGP Site: " + \ PmagSiteRec["er_site_name"] # decorate some more PmagResRec["result_description"] = "Site VGP, coord system = " + \ str(coord) + ' component: ' + comp PmagResRec['er_site_names'] = PmagSiteRec['er_site_name'] PmagResRec['pmag_criteria_codes'] = 'ACCEPT' PmagResRec['er_citation_names'] = 'This study' PmagResRec['er_analyst_mail_names'] = user PmagResRec["er_location_names"] = PmagSiteRec["er_location_name"] if avg_directions_by_sample: PmagResRec["er_sample_names"] = PmagSiteRec["er_sample_names"] else: PmagResRec["er_specimen_names"] = PmagSiteRec["er_specimen_names"] PmagResRec["tilt_correction"] = PmagSiteRec['site_tilt_correction'] PmagResRec["pole_comp_name"] = PmagSiteRec['site_comp_name'] PmagResRec["average_dec"] = PmagSiteRec["site_dec"] PmagResRec["average_inc"] = PmagSiteRec["site_inc"] PmagResRec["average_alpha95"] = PmagSiteRec["site_alpha95"] PmagResRec["average_n"] = PmagSiteRec["site_n"] PmagResRec["average_n_lines"] = PmagSiteRec["site_n_lines"] PmagResRec["average_n_planes"] = PmagSiteRec["site_n_planes"] PmagResRec["vgp_n"] = PmagSiteRec["site_n"] PmagResRec["average_k"] = PmagSiteRec["site_k"] PmagResRec["average_r"] = PmagSiteRec["site_r"] PmagResRec["average_lat"] = '%10.4f ' % (lat) PmagResRec["average_lon"] = '%10.4f ' % (lon) if agefile != "": PmagResRec = pmag.get_age( PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge) site_height = pmag.get_dictitem( height_nfo, 'er_site_name', site, 'T') if len(site_height) > 0: PmagResRec["average_height"] = site_height[0]['site_height'] PmagResRec["vgp_lat"] = '%7.1f ' % (plat) PmagResRec["vgp_lon"] = '%7.1f ' % (plon) PmagResRec["vgp_dp"] = '%7.1f ' % (dp) PmagResRec["vgp_dm"] = '%7.1f ' % (dm) PmagResRec["magic_method_codes"] = PmagSiteRec["magic_method_codes"] if '0' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-GEO" not in PmagSiteRec['magic_method_codes']: PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-GEO" if '100' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-TILT" not in PmagSiteRec['magic_method_codes']: PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-TILT" PmagSiteRec['site_polarity'] = "" if avg_by_polarity: # assign polarity based on angle of pole lat to spin axis - may want to re-think this sometime angle = pmag.angle([0, 0], [0, (90 - plat)]) if angle <= 55.: PmagSiteRec["site_polarity"] = 'n' if angle > 55. and angle < 125.: PmagSiteRec["site_polarity"] = 't' if angle >= 125.: PmagSiteRec["site_polarity"] = 'r' PmagResults.append(PmagResRec) if avg_by_polarity: # find the tilt corrected data crecs = pmag.get_dictitem( PmagSites, 'site_tilt_correction', '100', 'T') if len(crecs) < 2: # if there aren't any, find the geographic corrected data crecs = pmag.get_dictitem( PmagSites, 'site_tilt_correction', '0', 'T') if len(crecs) > 2: # if there are some, comp = pmag.get_list(crecs, 'site_comp_name').split(':')[ 0] # find the first component # fish out all of the first component crecs = pmag.get_dictitem(crecs, 'site_comp_name', comp, 'T') precs = [] for rec in crecs: precs.append({'dec': rec['site_dec'], 'inc': rec['site_inc'], 'name': rec['er_site_name'], 'loc': rec['er_location_name']}) # calculate average by polarity polpars = pmag.fisher_by_pol(precs) # hunt through all the modes (normal=A, reverse=B, all=ALL) for mode in list(polpars.keys()): PolRes = {} PolRes['er_citation_names'] = 'This study' PolRes["pmag_result_name"] = "Polarity Average: Polarity " + mode PolRes["data_type"] = "a" PolRes["average_dec"] = '%7.1f' % (polpars[mode]['dec']) PolRes["average_inc"] = '%7.1f' % (polpars[mode]['inc']) PolRes["average_n"] = '%i' % (polpars[mode]['n']) PolRes["average_r"] = '%5.4f' % (polpars[mode]['r']) PolRes["average_k"] = '%6.0f' % (polpars[mode]['k']) PolRes["average_alpha95"] = '%7.1f' % ( polpars[mode]['alpha95']) PolRes['er_site_names'] = polpars[mode]['sites'] PolRes['er_location_names'] = polpars[mode]['locs'] PolRes['magic_software_packages'] = version_num PmagResults.append(PolRes) if not skip_intensities and nositeints != 1: for site in sites: # now do intensities for each site if plotsites: print(site) if not avg_intensities_by_sample: key, intlist = 'specimen', SpecInts # if using specimen level data if avg_intensities_by_sample: key, intlist = 'sample', PmagSamps # if using sample level data # get all the intensities for this site Ints = pmag.get_dictitem(intlist, 'er_site_name', site, 'T') if len(Ints) > 0: # there are some # get average intensity stuff for site table PmagSiteRec = pmag.average_int(Ints, key, 'site') # get average intensity stuff for results table PmagResRec = pmag.average_int(Ints, key, 'average') if plotsites: # if site by site examination requested - print this site out to the screen for rec in Ints: print(rec['er_' + key + '_name'], ' %7.1f' % (1e6 * float(rec[key + '_int']))) if len(Ints) > 1: print('Average: ', '%7.1f' % ( 1e6 * float(PmagResRec['average_int'])), 'N: ', len(Ints)) print('Sigma: ', '%7.1f' % ( 1e6 * float(PmagResRec['average_int_sigma'])), 'Sigma %: ', PmagResRec['average_int_sigma_perc']) input('Press any key to continue\n') er_location_name = Ints[0]["er_location_name"] # decorate the records PmagSiteRec["er_location_name"] = er_location_name PmagSiteRec["er_citation_names"] = "This study" PmagResRec["er_location_names"] = er_location_name PmagResRec["er_citation_names"] = "This study" PmagSiteRec["er_analyst_mail_names"] = user PmagResRec["er_analyst_mail_names"] = user PmagResRec["data_type"] = 'i' if not avg_intensities_by_sample: PmagSiteRec['er_specimen_names'] = pmag.get_list( Ints, 'er_specimen_name') # list of all specimens used PmagResRec['er_specimen_names'] = pmag.get_list( Ints, 'er_specimen_name') PmagSiteRec['er_sample_names'] = pmag.get_list( Ints, 'er_sample_name') # list of all samples used PmagResRec['er_sample_names'] = pmag.get_list( Ints, 'er_sample_name') PmagSiteRec['er_site_name'] = site PmagResRec['er_site_names'] = site PmagSiteRec['magic_method_codes'] = pmag.get_list( Ints, 'magic_method_codes') PmagResRec['magic_method_codes'] = pmag.get_list( Ints, 'magic_method_codes') kill = pmag.grade(PmagSiteRec, accept, 'site_int') if nocrit == 1 or len(kill) == 0: b, sig = float(PmagResRec['average_int']), "" if(PmagResRec['average_int_sigma']) != "": sig = float(PmagResRec['average_int_sigma']) # fish out site direction sdir = pmag.get_dictitem( PmagResults, 'er_site_names', site, 'T') # get the VDM for this record using last average # inclination (hope it is the right one!) if len(sdir) > 0 and sdir[-1]['average_inc'] != "": inc = float(sdir[0]['average_inc']) # get magnetic latitude using dipole formula mlat = pmag.magnetic_lat(inc) # get VDM with magnetic latitude PmagResRec["vdm"] = '%8.3e ' % (pmag.b_vdm(b, mlat)) PmagResRec["vdm_n"] = PmagResRec['average_int_n'] if 'average_int_sigma' in list(PmagResRec.keys()) and PmagResRec['average_int_sigma'] != "": vdm_sig = pmag.b_vdm( float(PmagResRec['average_int_sigma']), mlat) PmagResRec["vdm_sigma"] = '%8.3e ' % (vdm_sig) else: PmagResRec["vdm_sigma"] = "" mlat = "" # define a model latitude if get_model_lat == 1: # use present site latitude mlats = pmag.get_dictitem( SiteNFO, 'er_site_name', site, 'T') if len(mlats) > 0: mlat = mlats[0]['site_lat'] # use a model latitude from some plate reconstruction model # (or something) elif get_model_lat == 2: mlats = pmag.get_dictitem( ModelLats, 'er_site_name', site, 'T') if len(mlats) > 0: PmagResRec['model_lat'] = mlats[0]['site_model_lat'] mlat = PmagResRec['model_lat'] if mlat != "": # get the VADM using the desired latitude PmagResRec["vadm"] = '%8.3e ' % ( pmag.b_vdm(b, float(mlat))) if sig != "": vdm_sig = pmag.b_vdm( float(PmagResRec['average_int_sigma']), float(mlat)) PmagResRec["vadm_sigma"] = '%8.3e ' % (vdm_sig) PmagResRec["vadm_n"] = PmagResRec['average_int_n'] else: PmagResRec["vadm_sigma"] = "" # fish out site information (lat/lon, etc.) sitedat = pmag.get_dictitem( SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T') if len(sitedat) > 0: sitedat = sitedat[0] PmagResRec['average_lat'] = sitedat['site_lat'] PmagResRec['average_lon'] = sitedat['site_lon'] else: PmagResRec['average_lon'] = 'UNKNOWN' PmagResRec['average_lon'] = 'UNKNOWN' PmagResRec['magic_software_packages'] = version_num PmagResRec["pmag_result_name"] = "V[A]DM: Site " + site PmagResRec["result_description"] = "V[A]DM of site" PmagResRec["pmag_criteria_codes"] = "ACCEPT" if agefile != "": PmagResRec = pmag.get_age( PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge) site_height = pmag.get_dictitem( height_nfo, 'er_site_name', site, 'T') if len(site_height) > 0: PmagResRec["average_height"] = site_height[0]['site_height'] PmagSites.append(PmagSiteRec) PmagResults.append(PmagResRec) if len(PmagSites) > 0: Tmp, keylist = pmag.fillkeys(PmagSites) pmag.magic_write(siteout, Tmp, 'pmag_sites') print(' sites written to ', siteout) else: print("No Site level table") if len(PmagResults) > 0: TmpRes, keylist = pmag.fillkeys(PmagResults) pmag.magic_write(resout, TmpRes, 'pmag_results') print(' results written to ', resout) else: print("No Results level table")
[ "def", "specimens_results_magic", "(", "infile", "=", "'pmag_specimens.txt'", ",", "measfile", "=", "'magic_measurements.txt'", ",", "sampfile", "=", "'er_samples.txt'", ",", "sitefile", "=", "'er_sites.txt'", ",", "agefile", "=", "'er_ages.txt'", ",", "specout", "=",...
Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages. @param -> infile: path from the WD to the pmag speciemns table @param -> measfile: path from the WD to the magic measurement file @param -> sampfile: path from the WD to the er sample file @param -> sitefile: path from the WD to the er sites data file @param -> agefile: path from the WD to the er ages data file @param -> specout: path from the WD to the place to write the er specimens data file @param -> sampout: path from the WD to the place to write the pmag samples data file @param -> siteout: path from the WD to the place to write the pmag sites data file @param -> resout: path from the WD to the place to write the pmag results data file @param -> critout: path from the WD to the place to write the pmag criteria file @param -> instout: path from th WD to the place to write the magic instruments file @param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string
[ "Writes", "magic_instruments", "er_specimens", "pmag_samples", "pmag_sites", "pmag_criteria", "and", "pmag_results", ".", "The", "data", "used", "to", "write", "this", "is", "obtained", "by", "reading", "a", "pmag_speciemns", "a", "magic_measurements", "a", "er_sample...
python
train
57.4311
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/rst/rs3/rs3tree.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/rst/rs3/rs3tree.py#L376-L387
def sort_subtrees(self, *subtrees): """sort the given subtrees (of type DGParentedTree) based on their linear position in this RSTTree. If two subtrees have the same linear position in the RSTTree (i.e. one is a child of the other), they are sorted by their height in reverse order (i.e. the child appears before its parent). """ subtrees_desc_height = sorted(subtrees, key=methodcaller('node_height', self), reverse=True) return sorted(subtrees_desc_height, key=methodcaller('get_position', self))
[ "def", "sort_subtrees", "(", "self", ",", "*", "subtrees", ")", ":", "subtrees_desc_height", "=", "sorted", "(", "subtrees", ",", "key", "=", "methodcaller", "(", "'node_height'", ",", "self", ")", ",", "reverse", "=", "True", ")", "return", "sorted", "(",...
sort the given subtrees (of type DGParentedTree) based on their linear position in this RSTTree. If two subtrees have the same linear position in the RSTTree (i.e. one is a child of the other), they are sorted by their height in reverse order (i.e. the child appears before its parent).
[ "sort", "the", "given", "subtrees", "(", "of", "type", "DGParentedTree", ")", "based", "on", "their", "linear", "position", "in", "this", "RSTTree", ".", "If", "two", "subtrees", "have", "the", "same", "linear", "position", "in", "the", "RSTTree", "(", "i"...
python
train
54.083333
rosshamish/hexgrid
hexgrid.py
https://github.com/rosshamish/hexgrid/blob/16abb1822dc2789cb355f54fb06c7774eea1d9f2/hexgrid.py#L413-L422
def legal_node_coords(): """ Return all legal node coordinates on the grid """ nodes = set() for tile_id in legal_tile_ids(): for node in nodes_touching_tile(tile_id): nodes.add(node) logging.debug('Legal node coords({})={}'.format(len(nodes), nodes)) return nodes
[ "def", "legal_node_coords", "(", ")", ":", "nodes", "=", "set", "(", ")", "for", "tile_id", "in", "legal_tile_ids", "(", ")", ":", "for", "node", "in", "nodes_touching_tile", "(", "tile_id", ")", ":", "nodes", ".", "add", "(", "node", ")", "logging", "...
Return all legal node coordinates on the grid
[ "Return", "all", "legal", "node", "coordinates", "on", "the", "grid" ]
python
train
30.3