code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def consume(self): # pragma: no cover
""" start consuming rabbitmq messages """
print(' [*] Waiting for logs. To exit press CTRL+C')
self.channel.basic_consume(self.queue_name, self.callback)
self.channel.start_consuming() | start consuming rabbitmq messages | Below is the the instruction that describes the task:
### Input:
start consuming rabbitmq messages
### Response:
def consume(self): # pragma: no cover
""" start consuming rabbitmq messages """
print(' [*] Waiting for logs. To exit press CTRL+C')
self.channel.basic_consume(self.queue_name, self.callback)
self.channel.start_consuming() |
def replace_type(items, spec, loader, found, find_embeds=True, deepen=True):
# type: (Any, Dict[Text, Any], Loader, Set[Text], bool, bool) -> Any
""" Go through and replace types in the 'spec' mapping"""
if isinstance(items, MutableMapping):
# recursively check these fields for types to replace
if items.get("type") in ("record", "enum") and items.get("name"):
if items["name"] in found:
return items["name"]
found.add(items["name"])
if not deepen:
return items
items = copy.copy(items)
if not items.get("name"):
items["name"] = get_anon_name(items)
for name in ("type", "items", "fields"):
if name in items:
items[name] = replace_type(
items[name], spec, loader, found, find_embeds=find_embeds,
deepen=find_embeds)
if isinstance(items[name], MutableSequence):
items[name] = flatten(items[name])
return items
if isinstance(items, MutableSequence):
# recursively transform list
return [replace_type(i, spec, loader, found, find_embeds=find_embeds,
deepen=deepen) for i in items]
if isinstance(items, string_types):
# found a string which is a symbol corresponding to a type.
replace_with = None
if items in loader.vocab:
# If it's a vocabulary term, first expand it to its fully qualified
# URI
items = loader.vocab[items]
if items in spec:
# Look up in specialization map
replace_with = spec[items]
if replace_with:
return replace_type(replace_with, spec, loader, found, find_embeds=find_embeds)
found.add(items)
return items | Go through and replace types in the 'spec' mapping | Below is the the instruction that describes the task:
### Input:
Go through and replace types in the 'spec' mapping
### Response:
def replace_type(items, spec, loader, found, find_embeds=True, deepen=True):
# type: (Any, Dict[Text, Any], Loader, Set[Text], bool, bool) -> Any
""" Go through and replace types in the 'spec' mapping"""
if isinstance(items, MutableMapping):
# recursively check these fields for types to replace
if items.get("type") in ("record", "enum") and items.get("name"):
if items["name"] in found:
return items["name"]
found.add(items["name"])
if not deepen:
return items
items = copy.copy(items)
if not items.get("name"):
items["name"] = get_anon_name(items)
for name in ("type", "items", "fields"):
if name in items:
items[name] = replace_type(
items[name], spec, loader, found, find_embeds=find_embeds,
deepen=find_embeds)
if isinstance(items[name], MutableSequence):
items[name] = flatten(items[name])
return items
if isinstance(items, MutableSequence):
# recursively transform list
return [replace_type(i, spec, loader, found, find_embeds=find_embeds,
deepen=deepen) for i in items]
if isinstance(items, string_types):
# found a string which is a symbol corresponding to a type.
replace_with = None
if items in loader.vocab:
# If it's a vocabulary term, first expand it to its fully qualified
# URI
items = loader.vocab[items]
if items in spec:
# Look up in specialization map
replace_with = spec[items]
if replace_with:
return replace_type(replace_with, spec, loader, found, find_embeds=find_embeds)
found.add(items)
return items |
def wallet_balance_total(self, wallet):
"""
Returns the sum of all accounts balances in **wallet**
:param wallet: Wallet to return sum of balances for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_balance_total(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"balance": 10000,
"pending": 10000
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_balance_total', payload)
for k, v in resp.items():
resp[k] = int(v)
return resp | Returns the sum of all accounts balances in **wallet**
:param wallet: Wallet to return sum of balances for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_balance_total(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"balance": 10000,
"pending": 10000
} | Below is the the instruction that describes the task:
### Input:
Returns the sum of all accounts balances in **wallet**
:param wallet: Wallet to return sum of balances for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_balance_total(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"balance": 10000,
"pending": 10000
}
### Response:
def wallet_balance_total(self, wallet):
"""
Returns the sum of all accounts balances in **wallet**
:param wallet: Wallet to return sum of balances for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_balance_total(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"balance": 10000,
"pending": 10000
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_balance_total', payload)
for k, v in resp.items():
resp[k] = int(v)
return resp |
def _parse_css_color(color):
'''_parse_css_color(css_color) -> gtk.gdk.Color'''
if color.startswith("rgb(") and color.endswith(')'):
r, g, b = [int(c)*257 for c in color[4:-1].split(',')]
return gtk.gdk.Color(r, g, b)
else:
return gtk.gdk.color_parse(color) | _parse_css_color(css_color) -> gtk.gdk.Color | Below is the the instruction that describes the task:
### Input:
_parse_css_color(css_color) -> gtk.gdk.Color
### Response:
def _parse_css_color(color):
'''_parse_css_color(css_color) -> gtk.gdk.Color'''
if color.startswith("rgb(") and color.endswith(')'):
r, g, b = [int(c)*257 for c in color[4:-1].split(',')]
return gtk.gdk.Color(r, g, b)
else:
return gtk.gdk.color_parse(color) |
def _dynamic_mul(self, dimensions, other, keys):
"""
Implements dynamic version of overlaying operation overlaying
DynamicMaps and HoloMaps where the key dimensions of one is
a strict superset of the other.
"""
# If either is a HoloMap compute Dimension values
if not isinstance(self, DynamicMap) or not isinstance(other, DynamicMap):
keys = sorted((d, v) for k in keys for d, v in k)
grouped = dict([(g, [v for _, v in group])
for g, group in groupby(keys, lambda x: x[0])])
dimensions = [d(values=grouped[d.name]) for d in dimensions]
map_obj = None
# Combine streams
map_obj = self if isinstance(self, DynamicMap) else other
if isinstance(self, DynamicMap) and isinstance(other, DynamicMap):
self_streams = util.dimensioned_streams(self)
other_streams = util.dimensioned_streams(other)
streams = list(util.unique_iterator(self_streams+other_streams))
else:
streams = map_obj.streams
def dynamic_mul(*key, **kwargs):
key_map = {d.name: k for d, k in zip(dimensions, key)}
layers = []
try:
self_el = self.select(HoloMap, **key_map) if self.kdims else self[()]
layers.append(self_el)
except KeyError:
pass
try:
other_el = other.select(HoloMap, **key_map) if other.kdims else other[()]
layers.append(other_el)
except KeyError:
pass
return Overlay(layers)
callback = Callable(dynamic_mul, inputs=[self, other])
callback._is_overlay = True
if map_obj:
return map_obj.clone(callback=callback, shared_data=False,
kdims=dimensions, streams=streams)
else:
return DynamicMap(callback=callback, kdims=dimensions,
streams=streams) | Implements dynamic version of overlaying operation overlaying
DynamicMaps and HoloMaps where the key dimensions of one is
a strict superset of the other. | Below is the the instruction that describes the task:
### Input:
Implements dynamic version of overlaying operation overlaying
DynamicMaps and HoloMaps where the key dimensions of one is
a strict superset of the other.
### Response:
def _dynamic_mul(self, dimensions, other, keys):
"""
Implements dynamic version of overlaying operation overlaying
DynamicMaps and HoloMaps where the key dimensions of one is
a strict superset of the other.
"""
# If either is a HoloMap compute Dimension values
if not isinstance(self, DynamicMap) or not isinstance(other, DynamicMap):
keys = sorted((d, v) for k in keys for d, v in k)
grouped = dict([(g, [v for _, v in group])
for g, group in groupby(keys, lambda x: x[0])])
dimensions = [d(values=grouped[d.name]) for d in dimensions]
map_obj = None
# Combine streams
map_obj = self if isinstance(self, DynamicMap) else other
if isinstance(self, DynamicMap) and isinstance(other, DynamicMap):
self_streams = util.dimensioned_streams(self)
other_streams = util.dimensioned_streams(other)
streams = list(util.unique_iterator(self_streams+other_streams))
else:
streams = map_obj.streams
def dynamic_mul(*key, **kwargs):
key_map = {d.name: k for d, k in zip(dimensions, key)}
layers = []
try:
self_el = self.select(HoloMap, **key_map) if self.kdims else self[()]
layers.append(self_el)
except KeyError:
pass
try:
other_el = other.select(HoloMap, **key_map) if other.kdims else other[()]
layers.append(other_el)
except KeyError:
pass
return Overlay(layers)
callback = Callable(dynamic_mul, inputs=[self, other])
callback._is_overlay = True
if map_obj:
return map_obj.clone(callback=callback, shared_data=False,
kdims=dimensions, streams=streams)
else:
return DynamicMap(callback=callback, kdims=dimensions,
streams=streams) |
def simOnePrd(self):
'''
Simulate one period of the fashion victom model for this type. Each
agent receives an idiosyncratic preference shock and chooses whether to
change styles (using the optimal decision rule).
Parameters
----------
none
Returns
-------
none
'''
pNow = self.pNow
sPrev = self.sNow
J2Pprob = self.switchFuncJock(pNow)
P2Jprob = self.switchFuncPunk(pNow)
Shks = self.RNG.rand(self.pop_size)
J2P = np.logical_and(sPrev == 0,Shks < J2Pprob)
P2J = np.logical_and(sPrev == 1,Shks < P2Jprob)
sNow = copy(sPrev)
sNow[J2P] = 1
sNow[P2J] = 0
self.sNow = sNow | Simulate one period of the fashion victom model for this type. Each
agent receives an idiosyncratic preference shock and chooses whether to
change styles (using the optimal decision rule).
Parameters
----------
none
Returns
-------
none | Below is the the instruction that describes the task:
### Input:
Simulate one period of the fashion victom model for this type. Each
agent receives an idiosyncratic preference shock and chooses whether to
change styles (using the optimal decision rule).
Parameters
----------
none
Returns
-------
none
### Response:
def simOnePrd(self):
'''
Simulate one period of the fashion victom model for this type. Each
agent receives an idiosyncratic preference shock and chooses whether to
change styles (using the optimal decision rule).
Parameters
----------
none
Returns
-------
none
'''
pNow = self.pNow
sPrev = self.sNow
J2Pprob = self.switchFuncJock(pNow)
P2Jprob = self.switchFuncPunk(pNow)
Shks = self.RNG.rand(self.pop_size)
J2P = np.logical_and(sPrev == 0,Shks < J2Pprob)
P2J = np.logical_and(sPrev == 1,Shks < P2Jprob)
sNow = copy(sPrev)
sNow[J2P] = 1
sNow[P2J] = 0
self.sNow = sNow |
def _update_config(self,directory,filename):
"""Manages FB config files"""
basefilename=os.path.splitext(filename)[0]
ext=os.path.splitext(filename)[1].lower()
#if filename==LOCATION_FILE:
#return self._update_config_location(directory)
#FIXME
#elif filename==TAG_FILE:
#return self._update_config_tags(directory)
if filename==SET_FILE:
print("%s - Moving photos to album"%(filename))
return self._upload_media(directory,movealbum_request=True)
elif filename==MEGAPIXEL_FILE:
print("%s - Resizing photos"%(filename))
return self._upload_media(directory,resize_request=True)
elif ext in self.FB_META_EXTENSIONS:
print("%s - Changing photo title"%(basefilename))
return self._upload_media(directory,basefilename,changetitle_request=True)
return False | Manages FB config files | Below is the the instruction that describes the task:
### Input:
Manages FB config files
### Response:
def _update_config(self,directory,filename):
"""Manages FB config files"""
basefilename=os.path.splitext(filename)[0]
ext=os.path.splitext(filename)[1].lower()
#if filename==LOCATION_FILE:
#return self._update_config_location(directory)
#FIXME
#elif filename==TAG_FILE:
#return self._update_config_tags(directory)
if filename==SET_FILE:
print("%s - Moving photos to album"%(filename))
return self._upload_media(directory,movealbum_request=True)
elif filename==MEGAPIXEL_FILE:
print("%s - Resizing photos"%(filename))
return self._upload_media(directory,resize_request=True)
elif ext in self.FB_META_EXTENSIONS:
print("%s - Changing photo title"%(basefilename))
return self._upload_media(directory,basefilename,changetitle_request=True)
return False |
def set_default(self):
"""
ensures there's only 1 default group
(logic overridable via custom models)
"""
queryset = self.get_default_queryset()
if queryset.exists():
queryset.update(default=False) | ensures there's only 1 default group
(logic overridable via custom models) | Below is the the instruction that describes the task:
### Input:
ensures there's only 1 default group
(logic overridable via custom models)
### Response:
def set_default(self):
"""
ensures there's only 1 default group
(logic overridable via custom models)
"""
queryset = self.get_default_queryset()
if queryset.exists():
queryset.update(default=False) |
def get_data_en_intervalo(d0=None, df=None, date_fmt=DATE_FMT,
usar_multithread=USAR_MULTITHREAD, max_threads_requests=MAX_THREADS_REQUESTS,
timeout=TIMEOUT, num_retries=NUM_RETRIES,
func_procesa_data_dia=None, func_url_data_dia=None, max_act_exec=None, verbose=True,
data_extra_request=None):
"""
Obtiene los datos en bruto de la red realizando múltiples requests al tiempo
Procesa los datos en bruto obtenidos de la red convirtiendo a Pandas DataFrame
"""
def _date(dia_string):
if dia_string is None:
return dt.date.today()
elif type(dia_string) is pd.Timestamp:
return dia_string.to_datetime().date()
elif type(dia_string) is not dt.date:
return dt.datetime.strptime(dia_string, date_fmt).date()
else:
return dia_string
def _procesa_merge_datos_dias(lista_m, dict_data_merge):
def _merge_datos_dias(key_tarea_merge, dict_merge_dias):
dict_merge_dias[key_tarea_merge] = merge_data(dict_merge_dias[key_tarea_merge])
if num_dias > 1 and usar_multithread:
lista_grupos = list()
grupos_dias = [lista_m[i:i + DIAS_MERGE_MAX] for i in np.arange(0, num_dias, DIAS_MERGE_MAX)]
for grupo in grupos_dias:
lista_dfs = list()
for key_g in grupo:
lista_dfs.append(dict_data_merge[key_g])
lista_grupos.append(lista_dfs)
keys_grupos = np.arange(len(lista_grupos))
dict_merge = dict(zip(keys_grupos, lista_grupos))
procesa_tareas_paralelo(keys_grupos, dict_merge, _merge_datos_dias,
'\nMERGE DATAFRAMES DE DATOS WEB DIARIOS (%lu GRUPOS)',
usar_multithread, MAX_THREADS_MERGE, verbose=verbose)
dict_merge_final = {0: [dict_merge[k] for k in dict_merge.keys()]}
_merge_datos_dias(0, dict_merge_final)
return dict_merge_final[0]
else:
return merge_data(list(dict_data_merge.values()))
def _hay_errores_en_datos_obtenidos(dict_data_obtenida):
keys = list(sorted(dict_data_obtenida.keys()))
data_es_none = [dict_data_obtenida[k] is None for k in keys]
error = False
if any(data_es_none):
df_err = pd.DataFrame({'key': keys, 'is_bad': data_es_none})
df_err['date'] = df_err['key'].apply(lambda x: pd.Timestamp(x))
df_err['delta'] = (df_err['date'] - df_err['date'].shift(1)).fillna(3600 * 24)
df_g = df_err[~df_err['is_bad']].copy()
df_g['delta_g'] = (df_g['date'] - df_g['date'].shift(1)).fillna(3600 * 24)
# print(df_err)
# print(df_err['delta'].describe())
# print(df_g['delta_g'].describe())
if df_g['delta_g'].max() < pd.Timedelta(2, 'D'):
bad_days = df_err[df_err['is_bad']]['key'].tolist()
if verbose:
print('HAY TAREAS NO REALIZADAS ({}):\n{}'.format(len(bad_days), bad_days))
logging.error('HAY TAREAS NO REALIZADAS ({}):\n{}'.format(len(bad_days), bad_days))
error = False
else:
if verbose:
print('NO HAY NINGUNA TAREA REALIZADA!')
logging.error('NO HAY NINGUNA TAREA REALIZADA!')
bad_days = df_err['key'].tolist()
error = True
for k in bad_days:
dict_data_obtenida.pop(k)
return error
def _obtiene_request(url, key, headers=None, p_req=None, json_r=False, **kwargs_r):
if type(url) is list:
results = [request_data_url(u, headers, num_retries, timeout, p_req, json_r, **kwargs_r) for u in url]
dict_data[key] = list(zip(*results))
else:
stat_response = request_data_url(url, headers, num_retries, timeout, p_req, json_r, **kwargs_r)
dict_data[key] = stat_response
def _obtiene_data_dia(key, dict_data_responses):
url = func_url_data_dia(key)
extra = dict_data_responses[key] if type(dict_data_responses[key]) is dict else {}
headers = extra.pop('headers', None)
json_req = extra.pop('json_req', False)
params_request = extra.pop('params_request', None)
try:
count_process, ok = 0, -1
while count_process < num_retries and ok != 0:
_obtiene_request(url, key, headers, params_request, json_req, **extra)
data_import, ok = func_procesa_data_dia(key, dict_data_responses[key][1])
if ok == 0:
dict_data_responses[key] = data_import
elif ok == -2: # Código de salida temprana:
count_process = num_retries
count_process += 1
if ok != 0:
dict_data_responses[key] = None
except Exception as e:
if verbose:
print('PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'.format(e, key, url))
logging.error('PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'.format(e, key, url))
dict_data_responses[key] = None
tic_ini = time.time()
lista_dias = [dia.strftime(date_fmt) for dia in pd.date_range(_date(d0), _date(df))]
if max_act_exec: # BORRAR. Es para limitar el nº de días adquiridos de golpe.
lista_dias = lista_dias[:max_act_exec]
num_dias = len(lista_dias)
if data_extra_request is None:
dict_data = dict(zip(lista_dias, np.zeros(num_dias)))
else:
dict_data = dict(zip(lista_dias, [data_extra_request.copy() for _ in range(num_dias)]))
# IMPORTA DATOS Y LOS PROCESA
procesa_tareas_paralelo(lista_dias, dict_data, _obtiene_data_dia,
'\nPROCESADO DE DATOS WEB DE %lu DÍAS',
usar_multithread, max_threads_requests, verbose=verbose)
hay_errores = _hay_errores_en_datos_obtenidos(dict_data)
# MERGE DATOS
# print(len(lista_dias), len(dict_data.keys()))
if not hay_errores and num_dias > 0:
# data_merge = _procesa_merge_datos_dias(lista_dias, dict_data)
data_merge = _procesa_merge_datos_dias(list(sorted(dict_data.keys())), dict_data)
str_resumen_import = '\n%lu días importados [Proceso Total %.2f seg, %.4f seg/día]' \
% (num_dias, time.time() - tic_ini, (time.time() - tic_ini) / float(num_dias))
return data_merge, hay_errores, str_resumen_import
else:
return None, hay_errores, 'ERROR IMPORTANDO!!' | Obtiene los datos en bruto de la red realizando múltiples requests al tiempo
Procesa los datos en bruto obtenidos de la red convirtiendo a Pandas DataFrame | Below is the the instruction that describes the task:
### Input:
Obtiene los datos en bruto de la red realizando múltiples requests al tiempo
Procesa los datos en bruto obtenidos de la red convirtiendo a Pandas DataFrame
### Response:
def get_data_en_intervalo(d0=None, df=None, date_fmt=DATE_FMT,
usar_multithread=USAR_MULTITHREAD, max_threads_requests=MAX_THREADS_REQUESTS,
timeout=TIMEOUT, num_retries=NUM_RETRIES,
func_procesa_data_dia=None, func_url_data_dia=None, max_act_exec=None, verbose=True,
data_extra_request=None):
"""
Obtiene los datos en bruto de la red realizando múltiples requests al tiempo
Procesa los datos en bruto obtenidos de la red convirtiendo a Pandas DataFrame
"""
def _date(dia_string):
if dia_string is None:
return dt.date.today()
elif type(dia_string) is pd.Timestamp:
return dia_string.to_datetime().date()
elif type(dia_string) is not dt.date:
return dt.datetime.strptime(dia_string, date_fmt).date()
else:
return dia_string
def _procesa_merge_datos_dias(lista_m, dict_data_merge):
def _merge_datos_dias(key_tarea_merge, dict_merge_dias):
dict_merge_dias[key_tarea_merge] = merge_data(dict_merge_dias[key_tarea_merge])
if num_dias > 1 and usar_multithread:
lista_grupos = list()
grupos_dias = [lista_m[i:i + DIAS_MERGE_MAX] for i in np.arange(0, num_dias, DIAS_MERGE_MAX)]
for grupo in grupos_dias:
lista_dfs = list()
for key_g in grupo:
lista_dfs.append(dict_data_merge[key_g])
lista_grupos.append(lista_dfs)
keys_grupos = np.arange(len(lista_grupos))
dict_merge = dict(zip(keys_grupos, lista_grupos))
procesa_tareas_paralelo(keys_grupos, dict_merge, _merge_datos_dias,
'\nMERGE DATAFRAMES DE DATOS WEB DIARIOS (%lu GRUPOS)',
usar_multithread, MAX_THREADS_MERGE, verbose=verbose)
dict_merge_final = {0: [dict_merge[k] for k in dict_merge.keys()]}
_merge_datos_dias(0, dict_merge_final)
return dict_merge_final[0]
else:
return merge_data(list(dict_data_merge.values()))
def _hay_errores_en_datos_obtenidos(dict_data_obtenida):
keys = list(sorted(dict_data_obtenida.keys()))
data_es_none = [dict_data_obtenida[k] is None for k in keys]
error = False
if any(data_es_none):
df_err = pd.DataFrame({'key': keys, 'is_bad': data_es_none})
df_err['date'] = df_err['key'].apply(lambda x: pd.Timestamp(x))
df_err['delta'] = (df_err['date'] - df_err['date'].shift(1)).fillna(3600 * 24)
df_g = df_err[~df_err['is_bad']].copy()
df_g['delta_g'] = (df_g['date'] - df_g['date'].shift(1)).fillna(3600 * 24)
# print(df_err)
# print(df_err['delta'].describe())
# print(df_g['delta_g'].describe())
if df_g['delta_g'].max() < pd.Timedelta(2, 'D'):
bad_days = df_err[df_err['is_bad']]['key'].tolist()
if verbose:
print('HAY TAREAS NO REALIZADAS ({}):\n{}'.format(len(bad_days), bad_days))
logging.error('HAY TAREAS NO REALIZADAS ({}):\n{}'.format(len(bad_days), bad_days))
error = False
else:
if verbose:
print('NO HAY NINGUNA TAREA REALIZADA!')
logging.error('NO HAY NINGUNA TAREA REALIZADA!')
bad_days = df_err['key'].tolist()
error = True
for k in bad_days:
dict_data_obtenida.pop(k)
return error
def _obtiene_request(url, key, headers=None, p_req=None, json_r=False, **kwargs_r):
if type(url) is list:
results = [request_data_url(u, headers, num_retries, timeout, p_req, json_r, **kwargs_r) for u in url]
dict_data[key] = list(zip(*results))
else:
stat_response = request_data_url(url, headers, num_retries, timeout, p_req, json_r, **kwargs_r)
dict_data[key] = stat_response
def _obtiene_data_dia(key, dict_data_responses):
url = func_url_data_dia(key)
extra = dict_data_responses[key] if type(dict_data_responses[key]) is dict else {}
headers = extra.pop('headers', None)
json_req = extra.pop('json_req', False)
params_request = extra.pop('params_request', None)
try:
count_process, ok = 0, -1
while count_process < num_retries and ok != 0:
_obtiene_request(url, key, headers, params_request, json_req, **extra)
data_import, ok = func_procesa_data_dia(key, dict_data_responses[key][1])
if ok == 0:
dict_data_responses[key] = data_import
elif ok == -2: # Código de salida temprana:
count_process = num_retries
count_process += 1
if ok != 0:
dict_data_responses[key] = None
except Exception as e:
if verbose:
print('PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'.format(e, key, url))
logging.error('PROCESANDO DATA!???? (Exception: {}; KEY: {}; URL: {})'.format(e, key, url))
dict_data_responses[key] = None
tic_ini = time.time()
lista_dias = [dia.strftime(date_fmt) for dia in pd.date_range(_date(d0), _date(df))]
if max_act_exec: # BORRAR. Es para limitar el nº de días adquiridos de golpe.
lista_dias = lista_dias[:max_act_exec]
num_dias = len(lista_dias)
if data_extra_request is None:
dict_data = dict(zip(lista_dias, np.zeros(num_dias)))
else:
dict_data = dict(zip(lista_dias, [data_extra_request.copy() for _ in range(num_dias)]))
# IMPORTA DATOS Y LOS PROCESA
procesa_tareas_paralelo(lista_dias, dict_data, _obtiene_data_dia,
'\nPROCESADO DE DATOS WEB DE %lu DÍAS',
usar_multithread, max_threads_requests, verbose=verbose)
hay_errores = _hay_errores_en_datos_obtenidos(dict_data)
# MERGE DATOS
# print(len(lista_dias), len(dict_data.keys()))
if not hay_errores and num_dias > 0:
# data_merge = _procesa_merge_datos_dias(lista_dias, dict_data)
data_merge = _procesa_merge_datos_dias(list(sorted(dict_data.keys())), dict_data)
str_resumen_import = '\n%lu días importados [Proceso Total %.2f seg, %.4f seg/día]' \
% (num_dias, time.time() - tic_ini, (time.time() - tic_ini) / float(num_dias))
return data_merge, hay_errores, str_resumen_import
else:
return None, hay_errores, 'ERROR IMPORTANDO!!' |
def outgoing_edges(self, node):
"""
Returns a ``tuple`` of outgoing edges for a **node object**.
Arguments:
- node(``object``) **node object** present in the graph to be queried
for outgoing edges.
"""
#TODO: pls make outgoig_edges less insane
edges = self.edges()
out_edges = []
for out_node, in_node in edges:
if node is out_node:
out_edges.append((out_node, in_node))
return tuple(out_edges) | Returns a ``tuple`` of outgoing edges for a **node object**.
Arguments:
- node(``object``) **node object** present in the graph to be queried
for outgoing edges. | Below is the the instruction that describes the task:
### Input:
Returns a ``tuple`` of outgoing edges for a **node object**.
Arguments:
- node(``object``) **node object** present in the graph to be queried
for outgoing edges.
### Response:
def outgoing_edges(self, node):
"""
Returns a ``tuple`` of outgoing edges for a **node object**.
Arguments:
- node(``object``) **node object** present in the graph to be queried
for outgoing edges.
"""
#TODO: pls make outgoig_edges less insane
edges = self.edges()
out_edges = []
for out_node, in_node in edges:
if node is out_node:
out_edges.append((out_node, in_node))
return tuple(out_edges) |
def delete_all_metadata(self):
"""
::
DELETE /:login/machines/:id/metadata
:Returns: current metadata
:rtype: empty :py:class:`dict`
Deletes all the metadata stored for this machine. Also explicitly
requests and returns the machine metadata so that the local copy stays
synchronized.
"""
j, r = self.datacenter.request('DELETE', self.path + '/metadata')
r.raise_for_status()
return self.get_metadata() | ::
DELETE /:login/machines/:id/metadata
:Returns: current metadata
:rtype: empty :py:class:`dict`
Deletes all the metadata stored for this machine. Also explicitly
requests and returns the machine metadata so that the local copy stays
synchronized. | Below is the the instruction that describes the task:
### Input:
::
DELETE /:login/machines/:id/metadata
:Returns: current metadata
:rtype: empty :py:class:`dict`
Deletes all the metadata stored for this machine. Also explicitly
requests and returns the machine metadata so that the local copy stays
synchronized.
### Response:
def delete_all_metadata(self):
"""
::
DELETE /:login/machines/:id/metadata
:Returns: current metadata
:rtype: empty :py:class:`dict`
Deletes all the metadata stored for this machine. Also explicitly
requests and returns the machine metadata so that the local copy stays
synchronized.
"""
j, r = self.datacenter.request('DELETE', self.path + '/metadata')
r.raise_for_status()
return self.get_metadata() |
def _handle_reset(self):
"""Reset this tile.
This process needs to trigger the peripheral tile to reregister itself
with the controller and get new configuration variables. It also
needs to clear app_running.
"""
self._registered.clear()
self._start_received.clear()
self._hosted_app_running.clear()
super(EmulatedPeripheralTile, self)._handle_reset() | Reset this tile.
This process needs to trigger the peripheral tile to reregister itself
with the controller and get new configuration variables. It also
needs to clear app_running. | Below is the the instruction that describes the task:
### Input:
Reset this tile.
This process needs to trigger the peripheral tile to reregister itself
with the controller and get new configuration variables. It also
needs to clear app_running.
### Response:
def _handle_reset(self):
"""Reset this tile.
This process needs to trigger the peripheral tile to reregister itself
with the controller and get new configuration variables. It also
needs to clear app_running.
"""
self._registered.clear()
self._start_received.clear()
self._hosted_app_running.clear()
super(EmulatedPeripheralTile, self)._handle_reset() |
def wheregreater(self, fieldname, value):
"""
Returns a new DataTable with rows only where the value at
`fieldname` > `value`.
"""
return self.mask([elem > value for elem in self[fieldname]]) | Returns a new DataTable with rows only where the value at
`fieldname` > `value`. | Below is the the instruction that describes the task:
### Input:
Returns a new DataTable with rows only where the value at
`fieldname` > `value`.
### Response:
def wheregreater(self, fieldname, value):
"""
Returns a new DataTable with rows only where the value at
`fieldname` > `value`.
"""
return self.mask([elem > value for elem in self[fieldname]]) |
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out) | Install a "package" on the ssh server | Below is the the instruction that describes the task:
### Input:
Install a "package" on the ssh server
### Response:
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out) |
def _sync_notes(self, notes_json):
""""Populate the user's notes from a JSON encoded list."""
for note_json in notes_json:
note_id = note_json['id']
task_id = note_json['item_id']
if task_id not in self.tasks:
# ignore orphan notes
continue
task = self.tasks[task_id]
self.notes[note_id] = Note(note_json, task) | Populate the user's notes from a JSON encoded list. | Below is the the instruction that describes the task:
### Input:
Populate the user's notes from a JSON encoded list.
### Response:
def _sync_notes(self, notes_json):
""""Populate the user's notes from a JSON encoded list."""
for note_json in notes_json:
note_id = note_json['id']
task_id = note_json['item_id']
if task_id not in self.tasks:
# ignore orphan notes
continue
task = self.tasks[task_id]
self.notes[note_id] = Note(note_json, task) |
def create(self, task_name, clone_task=None):
""" Creates a new task directory.
`task_name`
Task name.
`clone_task`
Existing task name to use as a template for new task.
Returns boolean.
* Raises ``Value`` if task name is invalid, ``TaskExists`` if task
already exists, or ``TaskNotFound`` if task for `clone_from`
doesn't exist.
"""
if not task_name or task_name.startswith('-'):
raise ValueError('Invalid task name')
try:
task_dir = self._get_task_dir(task_name)
if self.exists(task_dir):
raise errors.TaskExists(task_name)
task_cfg = self.get_config_path(task_name)
if clone_task:
if not self.exists(clone_task):
raise errors.TaskNotFound(clone_task)
# copy task directory
shutil.copytree(self._get_task_dir(clone_task), task_dir)
else:
os.mkdir(task_dir)
# write default task configuration
shutil.copy(self._default_task_config, task_cfg)
return True
except OSError:
shutil.rmtree(task_dir, ignore_errors=True)
return False | Creates a new task directory.
`task_name`
Task name.
`clone_task`
Existing task name to use as a template for new task.
Returns boolean.
* Raises ``Value`` if task name is invalid, ``TaskExists`` if task
already exists, or ``TaskNotFound`` if task for `clone_from`
doesn't exist. | Below is the the instruction that describes the task:
### Input:
Creates a new task directory.
`task_name`
Task name.
`clone_task`
Existing task name to use as a template for new task.
Returns boolean.
* Raises ``Value`` if task name is invalid, ``TaskExists`` if task
already exists, or ``TaskNotFound`` if task for `clone_from`
doesn't exist.
### Response:
def create(self, task_name, clone_task=None):
""" Creates a new task directory.
`task_name`
Task name.
`clone_task`
Existing task name to use as a template for new task.
Returns boolean.
* Raises ``Value`` if task name is invalid, ``TaskExists`` if task
already exists, or ``TaskNotFound`` if task for `clone_from`
doesn't exist.
"""
if not task_name or task_name.startswith('-'):
raise ValueError('Invalid task name')
try:
task_dir = self._get_task_dir(task_name)
if self.exists(task_dir):
raise errors.TaskExists(task_name)
task_cfg = self.get_config_path(task_name)
if clone_task:
if not self.exists(clone_task):
raise errors.TaskNotFound(clone_task)
# copy task directory
shutil.copytree(self._get_task_dir(clone_task), task_dir)
else:
os.mkdir(task_dir)
# write default task configuration
shutil.copy(self._default_task_config, task_cfg)
return True
except OSError:
shutil.rmtree(task_dir, ignore_errors=True)
return False |
def add_styles(self):
"""Add the css to the svg"""
colors = self.graph.style.get_colors(self.id, self.graph._order)
strokes = self.get_strokes()
all_css = []
auto_css = ['file://base.css']
if self.graph.style._google_fonts:
auto_css.append(
'//fonts.googleapis.com/css?family=%s' %
quote_plus('|'.join(self.graph.style._google_fonts))
)
for css in auto_css + list(self.graph.css):
css_text = None
if css.startswith('inline:'):
css_text = css[len('inline:'):]
elif css.startswith('file://'):
css = css[len('file://'):]
if not os.path.exists(css):
css = os.path.join(os.path.dirname(__file__), 'css', css)
with io.open(css, encoding='utf-8') as f:
css_text = template(
f.read(),
style=self.graph.style,
colors=colors,
strokes=strokes,
id=self.id
)
if css_text is not None:
if not self.graph.pretty_print:
css_text = minify_css(css_text)
all_css.append(css_text)
else:
if css.startswith('//') and self.graph.force_uri_protocol:
css = '%s:%s' % (self.graph.force_uri_protocol, css)
self.processing_instructions.append(
etree.PI(u('xml-stylesheet'), u('href="%s"' % css))
)
self.node(
self.defs, 'style', type='text/css'
).text = '\n'.join(all_css) | Add the css to the svg | Below is the the instruction that describes the task:
### Input:
Add the css to the svg
### Response:
def add_styles(self):
"""Add the css to the svg"""
colors = self.graph.style.get_colors(self.id, self.graph._order)
strokes = self.get_strokes()
all_css = []
auto_css = ['file://base.css']
if self.graph.style._google_fonts:
auto_css.append(
'//fonts.googleapis.com/css?family=%s' %
quote_plus('|'.join(self.graph.style._google_fonts))
)
for css in auto_css + list(self.graph.css):
css_text = None
if css.startswith('inline:'):
css_text = css[len('inline:'):]
elif css.startswith('file://'):
css = css[len('file://'):]
if not os.path.exists(css):
css = os.path.join(os.path.dirname(__file__), 'css', css)
with io.open(css, encoding='utf-8') as f:
css_text = template(
f.read(),
style=self.graph.style,
colors=colors,
strokes=strokes,
id=self.id
)
if css_text is not None:
if not self.graph.pretty_print:
css_text = minify_css(css_text)
all_css.append(css_text)
else:
if css.startswith('//') and self.graph.force_uri_protocol:
css = '%s:%s' % (self.graph.force_uri_protocol, css)
self.processing_instructions.append(
etree.PI(u('xml-stylesheet'), u('href="%s"' % css))
)
self.node(
self.defs, 'style', type='text/css'
).text = '\n'.join(all_css) |
def compute_eigen(self, n_comps=15, sym=None, sort='decrease'):
"""Compute eigen decomposition of transition matrix.
Parameters
----------
n_comps : `int`
Number of eigenvalues/vectors to be computed, set `n_comps = 0` if
you need all eigenvectors.
sym : `bool`
Instead of computing the eigendecomposition of the assymetric
transition matrix, computed the eigendecomposition of the symmetric
Ktilde matrix.
matrix : sparse matrix, np.ndarray, optional (default: `.connectivities`)
Matrix to diagonalize. Merely for testing and comparison purposes.
Returns
-------
Writes the following attributes.
eigen_values : numpy.ndarray
Eigenvalues of transition matrix.
eigen_basis : numpy.ndarray
Matrix of eigenvectors (stored in columns). `.eigen_basis` is
projection of data matrix on right eigenvectors, that is, the
projection on the diffusion components. these are simply the
components of the right eigenvectors and can directly be used for
plotting.
"""
np.set_printoptions(precision=10)
if self._transitions_sym is None:
raise ValueError('Run `.compute_transitions` first.')
matrix = self._transitions_sym
# compute the spectrum
if n_comps == 0:
evals, evecs = scipy.linalg.eigh(matrix)
else:
n_comps = min(matrix.shape[0]-1, n_comps)
# ncv = max(2 * n_comps + 1, int(np.sqrt(matrix.shape[0])))
ncv = None
which = 'LM' if sort == 'decrease' else 'SM'
# it pays off to increase the stability with a bit more precision
matrix = matrix.astype(np.float64)
evals, evecs = scipy.sparse.linalg.eigsh(matrix, k=n_comps,
which=which, ncv=ncv)
evals, evecs = evals.astype(np.float32), evecs.astype(np.float32)
if sort == 'decrease':
evals = evals[::-1]
evecs = evecs[:, ::-1]
logg.info(' eigenvalues of transition matrix\n'
' {}'.format(str(evals).replace('\n', '\n ')))
if self._number_connected_components > len(evals)/2:
logg.warn('Transition matrix has many disconnected components!')
self._eigen_values = evals
self._eigen_basis = evecs | Compute eigen decomposition of transition matrix.
Parameters
----------
n_comps : `int`
Number of eigenvalues/vectors to be computed, set `n_comps = 0` if
you need all eigenvectors.
sym : `bool`
Instead of computing the eigendecomposition of the assymetric
transition matrix, computed the eigendecomposition of the symmetric
Ktilde matrix.
matrix : sparse matrix, np.ndarray, optional (default: `.connectivities`)
Matrix to diagonalize. Merely for testing and comparison purposes.
Returns
-------
Writes the following attributes.
eigen_values : numpy.ndarray
Eigenvalues of transition matrix.
eigen_basis : numpy.ndarray
Matrix of eigenvectors (stored in columns). `.eigen_basis` is
projection of data matrix on right eigenvectors, that is, the
projection on the diffusion components. these are simply the
components of the right eigenvectors and can directly be used for
plotting. | Below is the the instruction that describes the task:
### Input:
Compute eigen decomposition of transition matrix.
Parameters
----------
n_comps : `int`
Number of eigenvalues/vectors to be computed, set `n_comps = 0` if
you need all eigenvectors.
sym : `bool`
Instead of computing the eigendecomposition of the assymetric
transition matrix, computed the eigendecomposition of the symmetric
Ktilde matrix.
matrix : sparse matrix, np.ndarray, optional (default: `.connectivities`)
Matrix to diagonalize. Merely for testing and comparison purposes.
Returns
-------
Writes the following attributes.
eigen_values : numpy.ndarray
Eigenvalues of transition matrix.
eigen_basis : numpy.ndarray
Matrix of eigenvectors (stored in columns). `.eigen_basis` is
projection of data matrix on right eigenvectors, that is, the
projection on the diffusion components. these are simply the
components of the right eigenvectors and can directly be used for
plotting.
### Response:
def compute_eigen(self, n_comps=15, sym=None, sort='decrease'):
"""Compute eigen decomposition of transition matrix.
Parameters
----------
n_comps : `int`
Number of eigenvalues/vectors to be computed, set `n_comps = 0` if
you need all eigenvectors.
sym : `bool`
Instead of computing the eigendecomposition of the assymetric
transition matrix, computed the eigendecomposition of the symmetric
Ktilde matrix.
matrix : sparse matrix, np.ndarray, optional (default: `.connectivities`)
Matrix to diagonalize. Merely for testing and comparison purposes.
Returns
-------
Writes the following attributes.
eigen_values : numpy.ndarray
Eigenvalues of transition matrix.
eigen_basis : numpy.ndarray
Matrix of eigenvectors (stored in columns). `.eigen_basis` is
projection of data matrix on right eigenvectors, that is, the
projection on the diffusion components. these are simply the
components of the right eigenvectors and can directly be used for
plotting.
"""
np.set_printoptions(precision=10)
if self._transitions_sym is None:
raise ValueError('Run `.compute_transitions` first.')
matrix = self._transitions_sym
# compute the spectrum
if n_comps == 0:
evals, evecs = scipy.linalg.eigh(matrix)
else:
n_comps = min(matrix.shape[0]-1, n_comps)
# ncv = max(2 * n_comps + 1, int(np.sqrt(matrix.shape[0])))
ncv = None
which = 'LM' if sort == 'decrease' else 'SM'
# it pays off to increase the stability with a bit more precision
matrix = matrix.astype(np.float64)
evals, evecs = scipy.sparse.linalg.eigsh(matrix, k=n_comps,
which=which, ncv=ncv)
evals, evecs = evals.astype(np.float32), evecs.astype(np.float32)
if sort == 'decrease':
evals = evals[::-1]
evecs = evecs[:, ::-1]
logg.info(' eigenvalues of transition matrix\n'
' {}'.format(str(evals).replace('\n', '\n ')))
if self._number_connected_components > len(evals)/2:
logg.warn('Transition matrix has many disconnected components!')
self._eigen_values = evals
self._eigen_basis = evecs |
def addUsage_Label(self,usage_label):
'''Appends one Usage_Label to usage_labels
'''
if isinstance(usage_label, Usage_Label):
self.usage_labels.append(usage_label)
else:
raise (Usage_LabelError,
'usage_label Type should be Usage_Label, not %s' % type(
usage_label)
) | Appends one Usage_Label to usage_labels | Below is the the instruction that describes the task:
### Input:
Appends one Usage_Label to usage_labels
### Response:
def addUsage_Label(self,usage_label):
'''Appends one Usage_Label to usage_labels
'''
if isinstance(usage_label, Usage_Label):
self.usage_labels.append(usage_label)
else:
raise (Usage_LabelError,
'usage_label Type should be Usage_Label, not %s' % type(
usage_label)
) |
def connect(host='localhost',
port=5672,
username='guest', password='guest',
virtual_host='/',
on_connection_close=None, *,
loop=None, sock=None, **kwargs):
"""
Connect to an AMQP server on the given host and port.
Log in to the given virtual host using the supplied credentials.
This function is a :ref:`coroutine <coroutine>`.
:param str host: the host server to connect to.
:param int port: the port which the AMQP server is listening on.
:param str username: the username to authenticate with.
:param str password: the password to authenticate with.
:param str virtual_host: the AMQP virtual host to connect to.
:param func on_connection_close: function called after connection lost.
:keyword BaseEventLoop loop: An instance of :class:`~asyncio.BaseEventLoop` to use.
(Defaults to :func:`asyncio.get_event_loop()`)
:keyword socket sock: A :func:`~socket.socket` instance to use for the connection.
This is passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.
If ``sock`` is supplied then ``host`` and ``port`` will be ignored.
Further keyword arguments are passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.
This function will set TCP_NODELAY on TCP and TCP6 sockets either on supplied ``sock`` or created one.
:return: the :class:`Connection` object.
"""
from .protocol import AMQP
from .routing import Dispatcher
from .connection import open_connection
loop = asyncio.get_event_loop() if loop is None else loop
if sock is None:
kwargs['host'] = host
kwargs['port'] = port
else:
kwargs['sock'] = sock
dispatcher = Dispatcher()
def protocol_factory():
return AMQP(dispatcher, loop, close_callback=on_connection_close)
transport, protocol = yield from loop.create_connection(protocol_factory, **kwargs)
# RPC-like applications require TCP_NODELAY in order to acheive
# minimal response time. Actually, this library send data in one
# big chunk and so this will not affect TCP-performance.
sk = transport.get_extra_info('socket')
# 1. Unfortunatelly we cannot check socket type (sk.type == socket.SOCK_STREAM). https://bugs.python.org/issue21327
# 2. Proto remains zero, if not specified at creation of socket
if (sk.family in (socket.AF_INET, socket.AF_INET6)) and (sk.proto in (0, socket.IPPROTO_TCP)):
sk.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
connection_info = {
'username': username,
'password': password,
'virtual_host': virtual_host
}
connection = yield from open_connection(
loop, transport, protocol, dispatcher, connection_info)
return connection | Connect to an AMQP server on the given host and port.
Log in to the given virtual host using the supplied credentials.
This function is a :ref:`coroutine <coroutine>`.
:param str host: the host server to connect to.
:param int port: the port which the AMQP server is listening on.
:param str username: the username to authenticate with.
:param str password: the password to authenticate with.
:param str virtual_host: the AMQP virtual host to connect to.
:param func on_connection_close: function called after connection lost.
:keyword BaseEventLoop loop: An instance of :class:`~asyncio.BaseEventLoop` to use.
(Defaults to :func:`asyncio.get_event_loop()`)
:keyword socket sock: A :func:`~socket.socket` instance to use for the connection.
This is passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.
If ``sock`` is supplied then ``host`` and ``port`` will be ignored.
Further keyword arguments are passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.
This function will set TCP_NODELAY on TCP and TCP6 sockets either on supplied ``sock`` or created one.
:return: the :class:`Connection` object. | Below is the the instruction that describes the task:
### Input:
Connect to an AMQP server on the given host and port.
Log in to the given virtual host using the supplied credentials.
This function is a :ref:`coroutine <coroutine>`.
:param str host: the host server to connect to.
:param int port: the port which the AMQP server is listening on.
:param str username: the username to authenticate with.
:param str password: the password to authenticate with.
:param str virtual_host: the AMQP virtual host to connect to.
:param func on_connection_close: function called after connection lost.
:keyword BaseEventLoop loop: An instance of :class:`~asyncio.BaseEventLoop` to use.
(Defaults to :func:`asyncio.get_event_loop()`)
:keyword socket sock: A :func:`~socket.socket` instance to use for the connection.
This is passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.
If ``sock`` is supplied then ``host`` and ``port`` will be ignored.
Further keyword arguments are passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.
This function will set TCP_NODELAY on TCP and TCP6 sockets either on supplied ``sock`` or created one.
:return: the :class:`Connection` object.
### Response:
def connect(host='localhost',
port=5672,
username='guest', password='guest',
virtual_host='/',
on_connection_close=None, *,
loop=None, sock=None, **kwargs):
"""
Connect to an AMQP server on the given host and port.
Log in to the given virtual host using the supplied credentials.
This function is a :ref:`coroutine <coroutine>`.
:param str host: the host server to connect to.
:param int port: the port which the AMQP server is listening on.
:param str username: the username to authenticate with.
:param str password: the password to authenticate with.
:param str virtual_host: the AMQP virtual host to connect to.
:param func on_connection_close: function called after connection lost.
:keyword BaseEventLoop loop: An instance of :class:`~asyncio.BaseEventLoop` to use.
(Defaults to :func:`asyncio.get_event_loop()`)
:keyword socket sock: A :func:`~socket.socket` instance to use for the connection.
This is passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.
If ``sock`` is supplied then ``host`` and ``port`` will be ignored.
Further keyword arguments are passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.
This function will set TCP_NODELAY on TCP and TCP6 sockets either on supplied ``sock`` or created one.
:return: the :class:`Connection` object.
"""
from .protocol import AMQP
from .routing import Dispatcher
from .connection import open_connection
loop = asyncio.get_event_loop() if loop is None else loop
if sock is None:
kwargs['host'] = host
kwargs['port'] = port
else:
kwargs['sock'] = sock
dispatcher = Dispatcher()
def protocol_factory():
return AMQP(dispatcher, loop, close_callback=on_connection_close)
transport, protocol = yield from loop.create_connection(protocol_factory, **kwargs)
# RPC-like applications require TCP_NODELAY in order to acheive
# minimal response time. Actually, this library send data in one
# big chunk and so this will not affect TCP-performance.
sk = transport.get_extra_info('socket')
# 1. Unfortunatelly we cannot check socket type (sk.type == socket.SOCK_STREAM). https://bugs.python.org/issue21327
# 2. Proto remains zero, if not specified at creation of socket
if (sk.family in (socket.AF_INET, socket.AF_INET6)) and (sk.proto in (0, socket.IPPROTO_TCP)):
sk.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
connection_info = {
'username': username,
'password': password,
'virtual_host': virtual_host
}
connection = yield from open_connection(
loop, transport, protocol, dispatcher, connection_info)
return connection |
def _create_aural_content_element(self, content, data_property_value):
"""
Create a element to show the content, only to aural displays.
:param content: The text content of element.
:type content: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:return: The element to show the content.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
content_element = self._create_content_element(
content,
data_property_value
)
content_element.set_attribute('unselectable', 'on')
content_element.set_attribute('class', 'screen-reader-only')
return content_element | Create a element to show the content, only to aural displays.
:param content: The text content of element.
:type content: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:return: The element to show the content.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement | Below is the the instruction that describes the task:
### Input:
Create a element to show the content, only to aural displays.
:param content: The text content of element.
:type content: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:return: The element to show the content.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
### Response:
def _create_aural_content_element(self, content, data_property_value):
"""
Create a element to show the content, only to aural displays.
:param content: The text content of element.
:type content: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:return: The element to show the content.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
content_element = self._create_content_element(
content,
data_property_value
)
content_element.set_attribute('unselectable', 'on')
content_element.set_attribute('class', 'screen-reader-only')
return content_element |
def merge_ligolws(elem):
"""
Merge all LIGO_LW elements that are immediate children of elem by
appending their children to the first.
"""
ligolws = [child for child in elem.childNodes if child.tagName == ligolw.LIGO_LW.tagName]
if ligolws:
dest = ligolws.pop(0)
for src in ligolws:
# copy children; LIGO_LW elements have no attributes
map(dest.appendChild, src.childNodes)
# unlink from parent
if src.parentNode is not None:
src.parentNode.removeChild(src)
return elem | Merge all LIGO_LW elements that are immediate children of elem by
appending their children to the first. | Below is the the instruction that describes the task:
### Input:
Merge all LIGO_LW elements that are immediate children of elem by
appending their children to the first.
### Response:
def merge_ligolws(elem):
"""
Merge all LIGO_LW elements that are immediate children of elem by
appending their children to the first.
"""
ligolws = [child for child in elem.childNodes if child.tagName == ligolw.LIGO_LW.tagName]
if ligolws:
dest = ligolws.pop(0)
for src in ligolws:
# copy children; LIGO_LW elements have no attributes
map(dest.appendChild, src.childNodes)
# unlink from parent
if src.parentNode is not None:
src.parentNode.removeChild(src)
return elem |
def get_diffs(history):
"""
Look at files and compute the diffs intelligently
"""
# First get all possible representations
mgr = plugins_get_mgr()
keys = mgr.search('representation')['representation']
representations = [mgr.get_by_key('representation', k) for k in keys]
for i in range(len(history)):
if i+1 > len(history) - 1:
continue
prev = history[i]
curr = history[i+1]
#print(prev['subject'], "==>", curr['subject'])
#print(curr['changes'])
for c in curr['changes']:
path = c['path']
# Skip the metadata file
if c['path'].endswith('datapackage.json'):
continue
# Find a handler for this kind of file...
handler = None
for r in representations:
if r.can_process(path):
handler = r
break
if handler is None:
continue
# print(path, "being handled by", handler)
v1_hex = prev['commit']
v2_hex = curr['commit']
temp1 = tempfile.mkdtemp(prefix="dgit-diff-")
try:
for h in [v1_hex, v2_hex]:
filename = '{}/{}/checkout.tar'.format(temp1, h)
try:
os.makedirs(os.path.dirname(filename))
except:
pass
extractcmd = ['git', 'archive', '-o', filename, h, path]
output = run(extractcmd)
if 'fatal' in output:
raise Exception("File not present in commit")
with cd(os.path.dirname(filename)):
cmd = ['tar', 'xvf', 'checkout.tar']
output = run(cmd)
if 'fatal' in output:
print("Cleaning up - fatal 1", temp1)
shutil.rmtree(temp1)
continue
# Check to make sure that
path1 = os.path.join(temp1, v1_hex, path)
path2 = os.path.join(temp1, v2_hex, path)
if not os.path.exists(path1) or not os.path.exists(path2):
# print("One of the two output files is missing")
shutil.rmtree(temp1)
continue
#print(path1, path2)
# Now call the handler
diff = handler.get_diff(path1, path2)
# print("Inserting diff", diff)
c['diff'] = diff
except Exception as e:
#traceback.print_exc()
#print("Cleaning up - Exception ", temp1)
shutil.rmtree(temp1) | Look at files and compute the diffs intelligently | Below is the the instruction that describes the task:
### Input:
Look at files and compute the diffs intelligently
### Response:
def get_diffs(history):
"""
Look at files and compute the diffs intelligently
"""
# First get all possible representations
mgr = plugins_get_mgr()
keys = mgr.search('representation')['representation']
representations = [mgr.get_by_key('representation', k) for k in keys]
for i in range(len(history)):
if i+1 > len(history) - 1:
continue
prev = history[i]
curr = history[i+1]
#print(prev['subject'], "==>", curr['subject'])
#print(curr['changes'])
for c in curr['changes']:
path = c['path']
# Skip the metadata file
if c['path'].endswith('datapackage.json'):
continue
# Find a handler for this kind of file...
handler = None
for r in representations:
if r.can_process(path):
handler = r
break
if handler is None:
continue
# print(path, "being handled by", handler)
v1_hex = prev['commit']
v2_hex = curr['commit']
temp1 = tempfile.mkdtemp(prefix="dgit-diff-")
try:
for h in [v1_hex, v2_hex]:
filename = '{}/{}/checkout.tar'.format(temp1, h)
try:
os.makedirs(os.path.dirname(filename))
except:
pass
extractcmd = ['git', 'archive', '-o', filename, h, path]
output = run(extractcmd)
if 'fatal' in output:
raise Exception("File not present in commit")
with cd(os.path.dirname(filename)):
cmd = ['tar', 'xvf', 'checkout.tar']
output = run(cmd)
if 'fatal' in output:
print("Cleaning up - fatal 1", temp1)
shutil.rmtree(temp1)
continue
# Check to make sure that
path1 = os.path.join(temp1, v1_hex, path)
path2 = os.path.join(temp1, v2_hex, path)
if not os.path.exists(path1) or not os.path.exists(path2):
# print("One of the two output files is missing")
shutil.rmtree(temp1)
continue
#print(path1, path2)
# Now call the handler
diff = handler.get_diff(path1, path2)
# print("Inserting diff", diff)
c['diff'] = diff
except Exception as e:
#traceback.print_exc()
#print("Cleaning up - Exception ", temp1)
shutil.rmtree(temp1) |
def encode_data(self, data, attributes):
'''(INTERNAL) Encodes a line of data.
Data instances follow the csv format, i.e, attribute values are
delimited by commas. After converted from csv.
:param data: a list of values.
:param attributes: a list of attributes. Used to check if data is valid.
:return: a string with the encoded data line.
'''
current_row = 0
for inst in data:
if len(inst) != len(attributes):
raise BadObject(
'Instance %d has %d attributes, expected %d' %
(current_row, len(inst), len(attributes))
)
new_data = []
for value in inst:
if value is None or value == u'' or value != value:
s = '?'
else:
s = encode_string(unicode(value))
new_data.append(s)
current_row += 1
yield u','.join(new_data) | (INTERNAL) Encodes a line of data.
Data instances follow the csv format, i.e, attribute values are
delimited by commas. After converted from csv.
:param data: a list of values.
:param attributes: a list of attributes. Used to check if data is valid.
:return: a string with the encoded data line. | Below is the the instruction that describes the task:
### Input:
(INTERNAL) Encodes a line of data.
Data instances follow the csv format, i.e, attribute values are
delimited by commas. After converted from csv.
:param data: a list of values.
:param attributes: a list of attributes. Used to check if data is valid.
:return: a string with the encoded data line.
### Response:
def encode_data(self, data, attributes):
'''(INTERNAL) Encodes a line of data.
Data instances follow the csv format, i.e, attribute values are
delimited by commas. After converted from csv.
:param data: a list of values.
:param attributes: a list of attributes. Used to check if data is valid.
:return: a string with the encoded data line.
'''
current_row = 0
for inst in data:
if len(inst) != len(attributes):
raise BadObject(
'Instance %d has %d attributes, expected %d' %
(current_row, len(inst), len(attributes))
)
new_data = []
for value in inst:
if value is None or value == u'' or value != value:
s = '?'
else:
s = encode_string(unicode(value))
new_data.append(s)
current_row += 1
yield u','.join(new_data) |
def home(request):
"""Renders Datafreezer homepage. Includes recent uploads."""
recent_uploads = Dataset.objects.order_by('-date_uploaded')[:11]
email_list = [upload.uploaded_by.strip() for upload in recent_uploads]
# print all_staff
emails_names = grab_names_from_emails(email_list)
# print emails_names
for upload in recent_uploads:
for item in emails_names:
if upload.uploaded_by == item:
upload.fullName = emails_names[item]
for upload in recent_uploads:
if not hasattr(upload, 'fullName'):
upload.fullName = upload.uploaded_by
return render(
request,
'datafreezer/home.html',
{
'recent_uploads': recent_uploads,
'heading': 'Most Recent Uploads'
}
) | Renders Datafreezer homepage. Includes recent uploads. | Below is the the instruction that describes the task:
### Input:
Renders Datafreezer homepage. Includes recent uploads.
### Response:
def home(request):
"""Renders Datafreezer homepage. Includes recent uploads."""
recent_uploads = Dataset.objects.order_by('-date_uploaded')[:11]
email_list = [upload.uploaded_by.strip() for upload in recent_uploads]
# print all_staff
emails_names = grab_names_from_emails(email_list)
# print emails_names
for upload in recent_uploads:
for item in emails_names:
if upload.uploaded_by == item:
upload.fullName = emails_names[item]
for upload in recent_uploads:
if not hasattr(upload, 'fullName'):
upload.fullName = upload.uploaded_by
return render(
request,
'datafreezer/home.html',
{
'recent_uploads': recent_uploads,
'heading': 'Most Recent Uploads'
}
) |
def move(self, bearing, distance):
'''move position by bearing and distance'''
lat = self.pkt['I105']['Lat']['val']
lon = self.pkt['I105']['Lon']['val']
(lat, lon) = mp_util.gps_newpos(lat, lon, bearing, distance)
self.setpos(lat, lon) | move position by bearing and distance | Below is the the instruction that describes the task:
### Input:
move position by bearing and distance
### Response:
def move(self, bearing, distance):
'''move position by bearing and distance'''
lat = self.pkt['I105']['Lat']['val']
lon = self.pkt['I105']['Lon']['val']
(lat, lon) = mp_util.gps_newpos(lat, lon, bearing, distance)
self.setpos(lat, lon) |
def func_args(func):
'''Basic function which returns a tuple of arguments of a function or
method.
'''
try:
return tuple(inspect.signature(func).parameters)
except:
return tuple(inspect.getargspec(func).args) | Basic function which returns a tuple of arguments of a function or
method. | Below is the the instruction that describes the task:
### Input:
Basic function which returns a tuple of arguments of a function or
method.
### Response:
def func_args(func):
'''Basic function which returns a tuple of arguments of a function or
method.
'''
try:
return tuple(inspect.signature(func).parameters)
except:
return tuple(inspect.getargspec(func).args) |
def altitude(msg):
"""Decode aircraft altitude
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: altitude in feet
"""
tc = common.typecode(msg)
if tc<9 or tc==19 or tc>22:
raise RuntimeError("%s: Not a airborn position message" % msg)
mb = common.hex2bin(msg)[32:]
if tc < 19:
# barometric altitude
q = mb[15]
if q:
n = common.bin2int(mb[8:15]+mb[16:20])
alt = n * 25 - 1000
else:
alt = None
else:
# GNSS altitude, meters -> feet
alt = common.bin2int(mb[8:20]) * 3.28084
return alt | Decode aircraft altitude
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: altitude in feet | Below is the the instruction that describes the task:
### Input:
Decode aircraft altitude
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: altitude in feet
### Response:
def altitude(msg):
"""Decode aircraft altitude
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: altitude in feet
"""
tc = common.typecode(msg)
if tc<9 or tc==19 or tc>22:
raise RuntimeError("%s: Not a airborn position message" % msg)
mb = common.hex2bin(msg)[32:]
if tc < 19:
# barometric altitude
q = mb[15]
if q:
n = common.bin2int(mb[8:15]+mb[16:20])
alt = n * 25 - 1000
else:
alt = None
else:
# GNSS altitude, meters -> feet
alt = common.bin2int(mb[8:20]) * 3.28084
return alt |
def is_empty(self):
"""
A group of modules is considered empty if it has no children or if
all its children are empty.
>>> from admin_tools.dashboard.modules import DashboardModule, LinkList
>>> mod = Group()
>>> mod.is_empty()
True
>>> mod.children.append(DashboardModule())
>>> mod.is_empty()
True
>>> mod.children.append(LinkList('links', children=[
... {'title': 'example1', 'url': 'http://example.com'},
... {'title': 'example2', 'url': 'http://example.com'},
... ]))
>>> mod.is_empty()
False
"""
if super(Group, self).is_empty():
return True
for child in self.children:
if not child.is_empty():
return False
return True | A group of modules is considered empty if it has no children or if
all its children are empty.
>>> from admin_tools.dashboard.modules import DashboardModule, LinkList
>>> mod = Group()
>>> mod.is_empty()
True
>>> mod.children.append(DashboardModule())
>>> mod.is_empty()
True
>>> mod.children.append(LinkList('links', children=[
... {'title': 'example1', 'url': 'http://example.com'},
... {'title': 'example2', 'url': 'http://example.com'},
... ]))
>>> mod.is_empty()
False | Below is the the instruction that describes the task:
### Input:
A group of modules is considered empty if it has no children or if
all its children are empty.
>>> from admin_tools.dashboard.modules import DashboardModule, LinkList
>>> mod = Group()
>>> mod.is_empty()
True
>>> mod.children.append(DashboardModule())
>>> mod.is_empty()
True
>>> mod.children.append(LinkList('links', children=[
... {'title': 'example1', 'url': 'http://example.com'},
... {'title': 'example2', 'url': 'http://example.com'},
... ]))
>>> mod.is_empty()
False
### Response:
def is_empty(self):
"""
A group of modules is considered empty if it has no children or if
all its children are empty.
>>> from admin_tools.dashboard.modules import DashboardModule, LinkList
>>> mod = Group()
>>> mod.is_empty()
True
>>> mod.children.append(DashboardModule())
>>> mod.is_empty()
True
>>> mod.children.append(LinkList('links', children=[
... {'title': 'example1', 'url': 'http://example.com'},
... {'title': 'example2', 'url': 'http://example.com'},
... ]))
>>> mod.is_empty()
False
"""
if super(Group, self).is_empty():
return True
for child in self.children:
if not child.is_empty():
return False
return True |
def plot(self, axis, title=None, saved=False):
"""
Plots the planar average electrostatic potential against the Long range and short range models from Freysoldt
"""
x = self.metadata['pot_plot_data'][axis]['x']
v_R = self.metadata['pot_plot_data'][axis]['Vr']
dft_diff = self.metadata['pot_plot_data'][axis]['dft_diff']
final_shift = self.metadata['pot_plot_data'][axis]['final_shift']
check = self.metadata['pot_plot_data'][axis]['check']
plt.figure()
plt.clf()
plt.plot(x, v_R, c="green", zorder=1, label="long range from model")
plt.plot(x, dft_diff, c="red", label="DFT locpot diff")
plt.plot(x, final_shift, c="blue", label="short range (aligned)")
tmpx = [x[i] for i in range(check[0], check[1])]
plt.fill_between(tmpx, -100, 100, facecolor="red", alpha=0.15, label="sampling region")
plt.xlim(round(x[0]), round(x[-1]))
ymin = min(min(v_R), min(dft_diff), min(final_shift))
ymax = max(max(v_R), max(dft_diff), max(final_shift))
plt.ylim(-0.2 + ymin, 0.2 + ymax)
plt.xlabel("distance along axis ($\AA$)", fontsize=15)
plt.ylabel("Potential (V)", fontsize=15)
plt.legend(loc=9)
plt.axhline(y=0, linewidth=0.2, color="black")
plt.title(str(title) + " defect potential", fontsize=18)
plt.xlim(0, max(x))
if saved:
plt.savefig(str(title) + "FreyplnravgPlot.pdf")
else:
return plt | Plots the planar average electrostatic potential against the Long range and short range models from Freysoldt | Below is the the instruction that describes the task:
### Input:
Plots the planar average electrostatic potential against the Long range and short range models from Freysoldt
### Response:
def plot(self, axis, title=None, saved=False):
"""
Plots the planar average electrostatic potential against the Long range and short range models from Freysoldt
"""
x = self.metadata['pot_plot_data'][axis]['x']
v_R = self.metadata['pot_plot_data'][axis]['Vr']
dft_diff = self.metadata['pot_plot_data'][axis]['dft_diff']
final_shift = self.metadata['pot_plot_data'][axis]['final_shift']
check = self.metadata['pot_plot_data'][axis]['check']
plt.figure()
plt.clf()
plt.plot(x, v_R, c="green", zorder=1, label="long range from model")
plt.plot(x, dft_diff, c="red", label="DFT locpot diff")
plt.plot(x, final_shift, c="blue", label="short range (aligned)")
tmpx = [x[i] for i in range(check[0], check[1])]
plt.fill_between(tmpx, -100, 100, facecolor="red", alpha=0.15, label="sampling region")
plt.xlim(round(x[0]), round(x[-1]))
ymin = min(min(v_R), min(dft_diff), min(final_shift))
ymax = max(max(v_R), max(dft_diff), max(final_shift))
plt.ylim(-0.2 + ymin, 0.2 + ymax)
plt.xlabel("distance along axis ($\AA$)", fontsize=15)
plt.ylabel("Potential (V)", fontsize=15)
plt.legend(loc=9)
plt.axhline(y=0, linewidth=0.2, color="black")
plt.title(str(title) + " defect potential", fontsize=18)
plt.xlim(0, max(x))
if saved:
plt.savefig(str(title) + "FreyplnravgPlot.pdf")
else:
return plt |
def groupByWordIndex(self, transaction: 'TransTmpl', offset: int):
"""
Group transaction parts splited on words to words
:param transaction: TransTmpl instance which parts
should be grupped into words
:return: generator of tuples (wordIndex, list of transaction parts
in this word)
"""
actualW = None
partsInWord = []
wordWidth = self.wordWidth
for item in self.splitOnWords(transaction, offset):
_actualW = item.startOfPart // wordWidth
if actualW is None:
actualW = _actualW
partsInWord.append(item)
elif _actualW > actualW:
yield (actualW, partsInWord)
actualW = _actualW
partsInWord = [item, ]
else:
partsInWord.append(item)
if partsInWord:
yield (actualW, partsInWord) | Group transaction parts splited on words to words
:param transaction: TransTmpl instance which parts
should be grupped into words
:return: generator of tuples (wordIndex, list of transaction parts
in this word) | Below is the the instruction that describes the task:
### Input:
Group transaction parts splited on words to words
:param transaction: TransTmpl instance which parts
should be grupped into words
:return: generator of tuples (wordIndex, list of transaction parts
in this word)
### Response:
def groupByWordIndex(self, transaction: 'TransTmpl', offset: int):
"""
Group transaction parts splited on words to words
:param transaction: TransTmpl instance which parts
should be grupped into words
:return: generator of tuples (wordIndex, list of transaction parts
in this word)
"""
actualW = None
partsInWord = []
wordWidth = self.wordWidth
for item in self.splitOnWords(transaction, offset):
_actualW = item.startOfPart // wordWidth
if actualW is None:
actualW = _actualW
partsInWord.append(item)
elif _actualW > actualW:
yield (actualW, partsInWord)
actualW = _actualW
partsInWord = [item, ]
else:
partsInWord.append(item)
if partsInWord:
yield (actualW, partsInWord) |
def _parse_api_options(self, options, query_string=False):
"""Select API options out of the provided options object.
Selects API string options out of the provided options object and
formats for either request body (default) or query string.
"""
api_options = self._select_options(options, self.API_OPTIONS)
if query_string:
# Prefix all options with "opt_"
query_api_options = {}
for key in api_options:
# Transform list/tuples into comma separated list
if isinstance(api_options[key], (list, tuple)):
query_api_options[
'opt_' + key] = ','.join(api_options[key])
else:
query_api_options[
'opt_' + key] = api_options[key]
return query_api_options
else:
return api_options | Select API options out of the provided options object.
Selects API string options out of the provided options object and
formats for either request body (default) or query string. | Below is the the instruction that describes the task:
### Input:
Select API options out of the provided options object.
Selects API string options out of the provided options object and
formats for either request body (default) or query string.
### Response:
def _parse_api_options(self, options, query_string=False):
"""Select API options out of the provided options object.
Selects API string options out of the provided options object and
formats for either request body (default) or query string.
"""
api_options = self._select_options(options, self.API_OPTIONS)
if query_string:
# Prefix all options with "opt_"
query_api_options = {}
for key in api_options:
# Transform list/tuples into comma separated list
if isinstance(api_options[key], (list, tuple)):
query_api_options[
'opt_' + key] = ','.join(api_options[key])
else:
query_api_options[
'opt_' + key] = api_options[key]
return query_api_options
else:
return api_options |
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key)) | Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0]. | Below is the the instruction that describes the task:
### Input:
Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].
### Response:
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key)) |
def run(self):
""" Main entry function. """
args = self._process_flags()
self.todofile = TodoFile.TodoFile(config().todotxt())
self.todolist = TodoList.TodoList(self.todofile.read())
try:
(subcommand, args) = get_subcommand(args)
except ConfigError as ce:
error('Error: ' + str(ce) + '. Check your aliases configuration')
sys.exit(1)
if subcommand is None:
CLIApplicationBase._usage()
if self._execute(subcommand, args) == False:
sys.exit(1)
else:
self._post_execute() | Main entry function. | Below is the the instruction that describes the task:
### Input:
Main entry function.
### Response:
def run(self):
""" Main entry function. """
args = self._process_flags()
self.todofile = TodoFile.TodoFile(config().todotxt())
self.todolist = TodoList.TodoList(self.todofile.read())
try:
(subcommand, args) = get_subcommand(args)
except ConfigError as ce:
error('Error: ' + str(ce) + '. Check your aliases configuration')
sys.exit(1)
if subcommand is None:
CLIApplicationBase._usage()
if self._execute(subcommand, args) == False:
sys.exit(1)
else:
self._post_execute() |
def modify_agent_properties(self, agent_id, key_value_map={}):
'''
modify_agent_properties(self, agent_id, key_value_map={})
Modify properties of an agent. If properties do not exists, they will be created
:Parameters:
* *agent_id* (`string`) -- Identifier of an existing agent
* *key_value_map* (`object`) -- Key value map of properties to change
* *value* (`string`) -- New Value of the property to change
:Example:
.. code-block:: python
opereto_client.modify_agent_properties('my_agent_id', {"mykey": "myvalue", "mykey2": "myvalue2"})
'''
return self._call_rest_api('post', '/agents/'+agent_id+'/properties', data=key_value_map, error='Failed to modify agent [%s] properties'%agent_id) | modify_agent_properties(self, agent_id, key_value_map={})
Modify properties of an agent. If properties do not exists, they will be created
:Parameters:
* *agent_id* (`string`) -- Identifier of an existing agent
* *key_value_map* (`object`) -- Key value map of properties to change
* *value* (`string`) -- New Value of the property to change
:Example:
.. code-block:: python
opereto_client.modify_agent_properties('my_agent_id', {"mykey": "myvalue", "mykey2": "myvalue2"}) | Below is the the instruction that describes the task:
### Input:
modify_agent_properties(self, agent_id, key_value_map={})
Modify properties of an agent. If properties do not exists, they will be created
:Parameters:
* *agent_id* (`string`) -- Identifier of an existing agent
* *key_value_map* (`object`) -- Key value map of properties to change
* *value* (`string`) -- New Value of the property to change
:Example:
.. code-block:: python
opereto_client.modify_agent_properties('my_agent_id', {"mykey": "myvalue", "mykey2": "myvalue2"})
### Response:
def modify_agent_properties(self, agent_id, key_value_map={}):
'''
modify_agent_properties(self, agent_id, key_value_map={})
Modify properties of an agent. If properties do not exists, they will be created
:Parameters:
* *agent_id* (`string`) -- Identifier of an existing agent
* *key_value_map* (`object`) -- Key value map of properties to change
* *value* (`string`) -- New Value of the property to change
:Example:
.. code-block:: python
opereto_client.modify_agent_properties('my_agent_id', {"mykey": "myvalue", "mykey2": "myvalue2"})
'''
return self._call_rest_api('post', '/agents/'+agent_id+'/properties', data=key_value_map, error='Failed to modify agent [%s] properties'%agent_id) |
def consolidate(args):
"""
%prog consolidate gffile1 gffile2 ... > consolidated.out
Given 2 or more gff files generated by pasa annotation comparison,
iterate through each locus (shared locus name or overlapping CDS)
and identify same/different isoforms (shared splicing structure)
across the input datasets.
If `slop` is enabled, consolidation will collapse any variation
in terminal UTR lengths, keeping the longest as representative.
"""
from jcvi.formats.base import longest_unique_prefix
from jcvi.formats.gff import make_index, match_subfeats
from jcvi.utils.cbook import AutoVivification
from jcvi.utils.grouper import Grouper
from itertools import combinations, product
supported_modes = ["name", "coords"]
p = OptionParser(consolidate.__doc__)
p.add_option("--slop", default=False, action="store_true",
help="allow minor variation in terminal 5'/3' UTR" + \
" start/stop position [default: %default]")
p.add_option("--inferUTR", default=False, action="store_true",
help="infer presence of UTRs from exon coordinates")
p.add_option("--mode", default="name", choices=supported_modes,
help="method used to determine overlapping loci")
p.add_option("--summary", default=False, action="store_true",
help="Generate summary table of consolidation process")
p.add_option("--clusters", default=False, action="store_true",
help="Generate table of cluster members after consolidation")
p.set_outfile()
opts, args = p.parse_args(args)
slop = opts.slop
inferUTR = opts.inferUTR
mode = opts.mode
if len(args) < 2:
sys.exit(not p.print_help())
gffdbx = {}
for gffile in args:
dbn = longest_unique_prefix(gffile, args)
gffdbx[dbn] = make_index(gffile)
loci = Grouper()
for dbn in gffdbx:
odbns = [odbn for odbn in gffdbx if dbn != odbn]
for gene in gffdbx[dbn].features_of_type('gene', order_by=('seqid', 'start')):
if mode == "name":
loci.join(gene.id, (gene.id, dbn))
else:
if (gene.id, dbn) not in loci:
loci.join((gene.id, dbn))
gene_cds = list(gffdbx[dbn].children(gene, \
featuretype='CDS', order_by=('start')))
gene_cds_start, gene_cds_stop = gene_cds[0].start, \
gene_cds[-1].stop
for odbn in odbns:
for ogene_cds in gffdbx[odbn].region(seqid=gene.seqid, \
start=gene_cds_start, end=gene_cds_stop, \
strand=gene.strand, featuretype='CDS'):
for ogene in gffdbx[odbn].parents(ogene_cds, featuretype='gene'):
loci.join((gene.id, dbn), (ogene.id, odbn))
gfeats = {}
mrna = AutoVivification()
for i, locus in enumerate(loci):
gene = "gene_{0:0{pad}}".format(i, pad=6) \
if mode == "coords" else None
for elem in locus:
if type(elem) == tuple:
_gene, dbn = elem
if gene is None: gene = _gene
g = gffdbx[dbn][_gene]
if gene not in gfeats:
gfeats[gene] = g
gfeats[gene].attributes['ID'] = [gene]
else:
if g.start < gfeats[gene].start:
gfeats[gene].start = g.start
if g.stop > gfeats[gene].stop:
gfeats[gene].stop = g.stop
c = list(gffdbx[dbn].children(_gene, featuretype='mRNA', order_by='start'))
if len(c) > 0:
mrna[gene][dbn] = c
fw = must_open(opts.outfile, "w")
print("##gff-version 3", file=fw)
seen = {}
if opts.summary:
summaryfile = "{0}.summary.txt".format(opts.outfile.rsplit(".")[0])
sfw = must_open(summaryfile, "w")
summary = ["id"]
summary.extend(gffdbx.keys())
print("\t".join(str(x) for x in summary), file=sfw)
if opts.clusters:
clustersfile = "{0}.clusters.txt".format(opts.outfile.rsplit(".")[0])
cfw = must_open(clustersfile, "w")
clusters = ["id", "dbns", "members", "trlens"]
print("\t".join(str(x) for x in clusters), file=cfw)
for gene in mrna:
g = Grouper()
dbns = list(combinations(mrna[gene], 2))
if len(dbns) > 0:
for dbn1, dbn2 in dbns:
dbx1, dbx2 = gffdbx[dbn1], gffdbx[dbn2]
for mrna1, mrna2 in product(mrna[gene][dbn1], mrna[gene][dbn2]):
mrna1s, mrna2s = mrna1.stop - mrna1.start + 1, \
mrna2.stop - mrna2.start + 1
g.join((dbn1, mrna1.id, mrna1s))
g.join((dbn2, mrna2.id, mrna2s))
if match_subfeats(mrna1, mrna2, dbx1, dbx2, featuretype='CDS'):
res = []
ftypes = ['exon'] if inferUTR else ['five_prime_UTR', 'three_prime_UTR']
for ftype in ftypes:
res.append(match_subfeats(mrna1, mrna2, dbx1, dbx2, featuretype=ftype, slop=slop))
if all(r == True for r in res):
g.join((dbn1, mrna1.id, mrna1s), (dbn2, mrna2.id, mrna2s))
else:
for dbn1 in mrna[gene]:
for mrna1 in mrna[gene][dbn1]:
g.join((dbn1, mrna1.id, mrna1.stop - mrna1.start + 1))
print(gfeats[gene], file=fw)
for group in g:
group.sort(key=lambda x: x[2], reverse=True)
dbs, mrnas = [el[0] for el in group], [el[1] for el in group]
d, m = dbs[0], mrnas[0]
dbid, _mrnaid = "|".join(str(x) for x in set(dbs)), []
for x in mrnas:
if x not in _mrnaid: _mrnaid.append(x)
mrnaid = "{0}|{1}".format(dbid, "-".join(_mrnaid))
if mrnaid not in seen:
seen[mrnaid] = 0
else:
seen[mrnaid] += 1
mrnaid = "{0}-{1}".format(mrnaid, seen[mrnaid])
_mrna = gffdbx[d][m]
_mrna.attributes['ID'] = [mrnaid]
_mrna.attributes['Parent'] = [gene]
children = gffdbx[d].children(m, order_by='start')
print(_mrna, file=fw)
for child in children:
child.attributes['ID'] = ["{0}|{1}".format(dbid, child.id)]
child.attributes['Parent'] = [mrnaid]
print(child, file=fw)
if opts.summary:
summary = [mrnaid]
summary.extend(['Y' if db in set(dbs) else 'N' for db in gffdbx])
print("\t".join(str(x) for x in summary), file=sfw)
if opts.clusters:
clusters = [mrnaid]
clusters.append(",".join(str(el[0]) for el in group))
clusters.append(",".join(str(el[1]) for el in group))
clusters.append(",".join(str(el[2]) for el in group))
print("\t".join(str(x) for x in clusters), file=cfw)
fw.close()
if opts.summary: sfw.close()
if opts.clusters: cfw.close() | %prog consolidate gffile1 gffile2 ... > consolidated.out
Given 2 or more gff files generated by pasa annotation comparison,
iterate through each locus (shared locus name or overlapping CDS)
and identify same/different isoforms (shared splicing structure)
across the input datasets.
If `slop` is enabled, consolidation will collapse any variation
in terminal UTR lengths, keeping the longest as representative. | Below is the the instruction that describes the task:
### Input:
%prog consolidate gffile1 gffile2 ... > consolidated.out
Given 2 or more gff files generated by pasa annotation comparison,
iterate through each locus (shared locus name or overlapping CDS)
and identify same/different isoforms (shared splicing structure)
across the input datasets.
If `slop` is enabled, consolidation will collapse any variation
in terminal UTR lengths, keeping the longest as representative.
### Response:
def consolidate(args):
"""
%prog consolidate gffile1 gffile2 ... > consolidated.out
Given 2 or more gff files generated by pasa annotation comparison,
iterate through each locus (shared locus name or overlapping CDS)
and identify same/different isoforms (shared splicing structure)
across the input datasets.
If `slop` is enabled, consolidation will collapse any variation
in terminal UTR lengths, keeping the longest as representative.
"""
from jcvi.formats.base import longest_unique_prefix
from jcvi.formats.gff import make_index, match_subfeats
from jcvi.utils.cbook import AutoVivification
from jcvi.utils.grouper import Grouper
from itertools import combinations, product
supported_modes = ["name", "coords"]
p = OptionParser(consolidate.__doc__)
p.add_option("--slop", default=False, action="store_true",
help="allow minor variation in terminal 5'/3' UTR" + \
" start/stop position [default: %default]")
p.add_option("--inferUTR", default=False, action="store_true",
help="infer presence of UTRs from exon coordinates")
p.add_option("--mode", default="name", choices=supported_modes,
help="method used to determine overlapping loci")
p.add_option("--summary", default=False, action="store_true",
help="Generate summary table of consolidation process")
p.add_option("--clusters", default=False, action="store_true",
help="Generate table of cluster members after consolidation")
p.set_outfile()
opts, args = p.parse_args(args)
slop = opts.slop
inferUTR = opts.inferUTR
mode = opts.mode
if len(args) < 2:
sys.exit(not p.print_help())
gffdbx = {}
for gffile in args:
dbn = longest_unique_prefix(gffile, args)
gffdbx[dbn] = make_index(gffile)
loci = Grouper()
for dbn in gffdbx:
odbns = [odbn for odbn in gffdbx if dbn != odbn]
for gene in gffdbx[dbn].features_of_type('gene', order_by=('seqid', 'start')):
if mode == "name":
loci.join(gene.id, (gene.id, dbn))
else:
if (gene.id, dbn) not in loci:
loci.join((gene.id, dbn))
gene_cds = list(gffdbx[dbn].children(gene, \
featuretype='CDS', order_by=('start')))
gene_cds_start, gene_cds_stop = gene_cds[0].start, \
gene_cds[-1].stop
for odbn in odbns:
for ogene_cds in gffdbx[odbn].region(seqid=gene.seqid, \
start=gene_cds_start, end=gene_cds_stop, \
strand=gene.strand, featuretype='CDS'):
for ogene in gffdbx[odbn].parents(ogene_cds, featuretype='gene'):
loci.join((gene.id, dbn), (ogene.id, odbn))
gfeats = {}
mrna = AutoVivification()
for i, locus in enumerate(loci):
gene = "gene_{0:0{pad}}".format(i, pad=6) \
if mode == "coords" else None
for elem in locus:
if type(elem) == tuple:
_gene, dbn = elem
if gene is None: gene = _gene
g = gffdbx[dbn][_gene]
if gene not in gfeats:
gfeats[gene] = g
gfeats[gene].attributes['ID'] = [gene]
else:
if g.start < gfeats[gene].start:
gfeats[gene].start = g.start
if g.stop > gfeats[gene].stop:
gfeats[gene].stop = g.stop
c = list(gffdbx[dbn].children(_gene, featuretype='mRNA', order_by='start'))
if len(c) > 0:
mrna[gene][dbn] = c
fw = must_open(opts.outfile, "w")
print("##gff-version 3", file=fw)
seen = {}
if opts.summary:
summaryfile = "{0}.summary.txt".format(opts.outfile.rsplit(".")[0])
sfw = must_open(summaryfile, "w")
summary = ["id"]
summary.extend(gffdbx.keys())
print("\t".join(str(x) for x in summary), file=sfw)
if opts.clusters:
clustersfile = "{0}.clusters.txt".format(opts.outfile.rsplit(".")[0])
cfw = must_open(clustersfile, "w")
clusters = ["id", "dbns", "members", "trlens"]
print("\t".join(str(x) for x in clusters), file=cfw)
for gene in mrna:
g = Grouper()
dbns = list(combinations(mrna[gene], 2))
if len(dbns) > 0:
for dbn1, dbn2 in dbns:
dbx1, dbx2 = gffdbx[dbn1], gffdbx[dbn2]
for mrna1, mrna2 in product(mrna[gene][dbn1], mrna[gene][dbn2]):
mrna1s, mrna2s = mrna1.stop - mrna1.start + 1, \
mrna2.stop - mrna2.start + 1
g.join((dbn1, mrna1.id, mrna1s))
g.join((dbn2, mrna2.id, mrna2s))
if match_subfeats(mrna1, mrna2, dbx1, dbx2, featuretype='CDS'):
res = []
ftypes = ['exon'] if inferUTR else ['five_prime_UTR', 'three_prime_UTR']
for ftype in ftypes:
res.append(match_subfeats(mrna1, mrna2, dbx1, dbx2, featuretype=ftype, slop=slop))
if all(r == True for r in res):
g.join((dbn1, mrna1.id, mrna1s), (dbn2, mrna2.id, mrna2s))
else:
for dbn1 in mrna[gene]:
for mrna1 in mrna[gene][dbn1]:
g.join((dbn1, mrna1.id, mrna1.stop - mrna1.start + 1))
print(gfeats[gene], file=fw)
for group in g:
group.sort(key=lambda x: x[2], reverse=True)
dbs, mrnas = [el[0] for el in group], [el[1] for el in group]
d, m = dbs[0], mrnas[0]
dbid, _mrnaid = "|".join(str(x) for x in set(dbs)), []
for x in mrnas:
if x not in _mrnaid: _mrnaid.append(x)
mrnaid = "{0}|{1}".format(dbid, "-".join(_mrnaid))
if mrnaid not in seen:
seen[mrnaid] = 0
else:
seen[mrnaid] += 1
mrnaid = "{0}-{1}".format(mrnaid, seen[mrnaid])
_mrna = gffdbx[d][m]
_mrna.attributes['ID'] = [mrnaid]
_mrna.attributes['Parent'] = [gene]
children = gffdbx[d].children(m, order_by='start')
print(_mrna, file=fw)
for child in children:
child.attributes['ID'] = ["{0}|{1}".format(dbid, child.id)]
child.attributes['Parent'] = [mrnaid]
print(child, file=fw)
if opts.summary:
summary = [mrnaid]
summary.extend(['Y' if db in set(dbs) else 'N' for db in gffdbx])
print("\t".join(str(x) for x in summary), file=sfw)
if opts.clusters:
clusters = [mrnaid]
clusters.append(",".join(str(el[0]) for el in group))
clusters.append(",".join(str(el[1]) for el in group))
clusters.append(",".join(str(el[2]) for el in group))
print("\t".join(str(x) for x in clusters), file=cfw)
fw.close()
if opts.summary: sfw.close()
if opts.clusters: cfw.close() |
def interp1d(x,Z,xout,spline=False,kind='linear',fill_value=np.NaN,**kwargs):
"""
INTERP1D : Interpolate values from a 1D vector at given positions
@param x: 1st dimension vector of size NX
@author: Renaud DUSSURGET, LER/PAC, Ifremer La Seyne
"""
linear = not spline
nx=len(x)
if linear :
try :
f = scipy.interpolate.interp1d(x, Z, kind=kind,bounds_error=False,fill_value=fill_value,**kwargs)
Zout = f(xout)
except RuntimeError : Zout = np.repeat(np.NaN,nx)
else :
tck = scipy.interpolate.splrep(x,Z,s=0)
try : Zout = scipy.interpolate.splev(xout,tck,der=0,**kwargs)
except RuntimeError : Zout = np.repeat(np.NaN,nx)
return Zout | INTERP1D : Interpolate values from a 1D vector at given positions
@param x: 1st dimension vector of size NX
@author: Renaud DUSSURGET, LER/PAC, Ifremer La Seyne | Below is the the instruction that describes the task:
### Input:
INTERP1D : Interpolate values from a 1D vector at given positions
@param x: 1st dimension vector of size NX
@author: Renaud DUSSURGET, LER/PAC, Ifremer La Seyne
### Response:
def interp1d(x,Z,xout,spline=False,kind='linear',fill_value=np.NaN,**kwargs):
"""
INTERP1D : Interpolate values from a 1D vector at given positions
@param x: 1st dimension vector of size NX
@author: Renaud DUSSURGET, LER/PAC, Ifremer La Seyne
"""
linear = not spline
nx=len(x)
if linear :
try :
f = scipy.interpolate.interp1d(x, Z, kind=kind,bounds_error=False,fill_value=fill_value,**kwargs)
Zout = f(xout)
except RuntimeError : Zout = np.repeat(np.NaN,nx)
else :
tck = scipy.interpolate.splrep(x,Z,s=0)
try : Zout = scipy.interpolate.splev(xout,tck,der=0,**kwargs)
except RuntimeError : Zout = np.repeat(np.NaN,nx)
return Zout |
def explain(self, *args, **kwargs):
'''Return a string that describes how these args are interpreted'''
args = self.get(*args, **kwargs)
results = ['%s = %s' % (name, value) for name, value in args.required]
results.extend(['%s = %s (overridden)' % (
name, value) for name, value in args.overridden])
results.extend(['%s = %s (default)' % (
name, value) for name, value in args.defaulted])
if self._varargs:
results.append('%s = %s' % (self._varargs, args.varargs))
if self._kwargs:
results.append('%s = %s' % (self._kwargs, args.kwargs))
return '\n\t'.join(results) | Return a string that describes how these args are interpreted | Below is the the instruction that describes the task:
### Input:
Return a string that describes how these args are interpreted
### Response:
def explain(self, *args, **kwargs):
'''Return a string that describes how these args are interpreted'''
args = self.get(*args, **kwargs)
results = ['%s = %s' % (name, value) for name, value in args.required]
results.extend(['%s = %s (overridden)' % (
name, value) for name, value in args.overridden])
results.extend(['%s = %s (default)' % (
name, value) for name, value in args.defaulted])
if self._varargs:
results.append('%s = %s' % (self._varargs, args.varargs))
if self._kwargs:
results.append('%s = %s' % (self._kwargs, args.kwargs))
return '\n\t'.join(results) |
def pull(directory: str) -> Commit:
"""
Pulls the subrepo that has been cloned into the given directory.
:param directory: the directory containing the subrepo
:return: the commit the subrepo is on
"""
if not os.path.exists(directory):
raise ValueError(f"No subrepo found in \"{directory}\"")
try:
result = run([GIT_COMMAND, _GIT_SUBREPO_COMMAND, _GIT_SUBREPO_PULL_COMMAND, _GIT_SUBREPO_VERBOSE_FLAG,
get_directory_relative_to_git_root(directory)],
execution_directory=get_git_root_directory(directory))
except RunException as e:
if "Can't pull subrepo. Working tree has changes" in e.stderr:
raise UnstagedChangeException() from e
return status(directory)[2] | Pulls the subrepo that has been cloned into the given directory.
:param directory: the directory containing the subrepo
:return: the commit the subrepo is on | Below is the the instruction that describes the task:
### Input:
Pulls the subrepo that has been cloned into the given directory.
:param directory: the directory containing the subrepo
:return: the commit the subrepo is on
### Response:
def pull(directory: str) -> Commit:
"""
Pulls the subrepo that has been cloned into the given directory.
:param directory: the directory containing the subrepo
:return: the commit the subrepo is on
"""
if not os.path.exists(directory):
raise ValueError(f"No subrepo found in \"{directory}\"")
try:
result = run([GIT_COMMAND, _GIT_SUBREPO_COMMAND, _GIT_SUBREPO_PULL_COMMAND, _GIT_SUBREPO_VERBOSE_FLAG,
get_directory_relative_to_git_root(directory)],
execution_directory=get_git_root_directory(directory))
except RunException as e:
if "Can't pull subrepo. Working tree has changes" in e.stderr:
raise UnstagedChangeException() from e
return status(directory)[2] |
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor | https://github.com/frictionlessdata/tableschema-sql-py#storage | Below is the the instruction that describes the task:
### Input:
https://github.com/frictionlessdata/tableschema-sql-py#storage
### Response:
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor |
def to_gmfs(shakemap, spatialcorr, crosscorr, site_effects, trunclevel,
num_gmfs, seed, imts=None):
"""
:returns: (IMT-strings, array of GMFs of shape (R, N, E, M)
"""
N = len(shakemap) # number of sites
std = shakemap['std']
if imts is None or len(imts) == 0:
imts = std.dtype.names
else:
imts = [imt for imt in imts if imt in std.dtype.names]
val = {imt: numpy.log(shakemap['val'][imt]) - std[imt] ** 2 / 2.
for imt in imts}
imts_ = [imt.from_string(name) for name in imts]
M = len(imts_)
cross_corr = cross_correlation_matrix(imts_, crosscorr)
mu = numpy.array([numpy.ones(num_gmfs) * val[str(imt)][j]
for imt in imts_ for j in range(N)])
dmatrix = geo.geodetic.distance_matrix(
shakemap['lon'], shakemap['lat'])
spatial_corr = spatial_correlation_array(dmatrix, imts_, spatialcorr)
stddev = [std[str(imt)] for imt in imts_]
for im, std in zip(imts_, stddev):
if std.sum() == 0:
raise ValueError('Cannot decompose the spatial covariance '
'because stddev==0 for IMT=%s' % im)
spatial_cov = spatial_covariance_array(stddev, spatial_corr)
L = cholesky(spatial_cov, cross_corr) # shape (M * N, M * N)
if trunclevel:
Z = truncnorm.rvs(-trunclevel, trunclevel, loc=0, scale=1,
size=(M * N, num_gmfs), random_state=seed)
else:
Z = norm.rvs(loc=0, scale=1, size=(M * N, num_gmfs), random_state=seed)
# Z has shape (M * N, E)
gmfs = numpy.exp(numpy.dot(L, Z) + mu) / PCTG
if site_effects:
gmfs = amplify_gmfs(imts_, shakemap['vs30'], gmfs)
if gmfs.max() > MAX_GMV:
logging.warning('There suspiciously large GMVs of %.2fg', gmfs.max())
return imts, gmfs.reshape((M, N, num_gmfs)).transpose(1, 2, 0) | :returns: (IMT-strings, array of GMFs of shape (R, N, E, M) | Below is the the instruction that describes the task:
### Input:
:returns: (IMT-strings, array of GMFs of shape (R, N, E, M)
### Response:
def to_gmfs(shakemap, spatialcorr, crosscorr, site_effects, trunclevel,
num_gmfs, seed, imts=None):
"""
:returns: (IMT-strings, array of GMFs of shape (R, N, E, M)
"""
N = len(shakemap) # number of sites
std = shakemap['std']
if imts is None or len(imts) == 0:
imts = std.dtype.names
else:
imts = [imt for imt in imts if imt in std.dtype.names]
val = {imt: numpy.log(shakemap['val'][imt]) - std[imt] ** 2 / 2.
for imt in imts}
imts_ = [imt.from_string(name) for name in imts]
M = len(imts_)
cross_corr = cross_correlation_matrix(imts_, crosscorr)
mu = numpy.array([numpy.ones(num_gmfs) * val[str(imt)][j]
for imt in imts_ for j in range(N)])
dmatrix = geo.geodetic.distance_matrix(
shakemap['lon'], shakemap['lat'])
spatial_corr = spatial_correlation_array(dmatrix, imts_, spatialcorr)
stddev = [std[str(imt)] for imt in imts_]
for im, std in zip(imts_, stddev):
if std.sum() == 0:
raise ValueError('Cannot decompose the spatial covariance '
'because stddev==0 for IMT=%s' % im)
spatial_cov = spatial_covariance_array(stddev, spatial_corr)
L = cholesky(spatial_cov, cross_corr) # shape (M * N, M * N)
if trunclevel:
Z = truncnorm.rvs(-trunclevel, trunclevel, loc=0, scale=1,
size=(M * N, num_gmfs), random_state=seed)
else:
Z = norm.rvs(loc=0, scale=1, size=(M * N, num_gmfs), random_state=seed)
# Z has shape (M * N, E)
gmfs = numpy.exp(numpy.dot(L, Z) + mu) / PCTG
if site_effects:
gmfs = amplify_gmfs(imts_, shakemap['vs30'], gmfs)
if gmfs.max() > MAX_GMV:
logging.warning('There suspiciously large GMVs of %.2fg', gmfs.max())
return imts, gmfs.reshape((M, N, num_gmfs)).transpose(1, 2, 0) |
def status(app_name=None, only_cozy=False, as_boolean=False):
'''Get apps status
:param app_name: If pass app name return this app status
:return: dict with all apps status or str with one app status
'''
apps = {}
# Get all apps status & slip them
apps_status = subprocess.Popen('cozy-monitor status',
shell=True,
stdout=subprocess.PIPE).stdout.read()
apps_status = apps_status.split('\n')
# Parse result to store them in apps dictionary
for app_status in apps_status:
if app_status:
app_status = ANSI_ESCAPE.sub('', app_status).split(': ')
if len(app_status) == 2:
current_status = app_status[1]
if as_boolean:
if app_status[1] == 'up':
current_status = True
else:
current_status = False
if only_cozy and app_status[0] not in SYSTEM_APPS:
apps[app_status[0]] = current_status
else:
apps[app_status[0]] = current_status
# Return app status if get as param or return all apps status
if app_name:
return apps.get(app_name, None)
else:
return apps | Get apps status
:param app_name: If pass app name return this app status
:return: dict with all apps status or str with one app status | Below is the the instruction that describes the task:
### Input:
Get apps status
:param app_name: If pass app name return this app status
:return: dict with all apps status or str with one app status
### Response:
def status(app_name=None, only_cozy=False, as_boolean=False):
'''Get apps status
:param app_name: If pass app name return this app status
:return: dict with all apps status or str with one app status
'''
apps = {}
# Get all apps status & slip them
apps_status = subprocess.Popen('cozy-monitor status',
shell=True,
stdout=subprocess.PIPE).stdout.read()
apps_status = apps_status.split('\n')
# Parse result to store them in apps dictionary
for app_status in apps_status:
if app_status:
app_status = ANSI_ESCAPE.sub('', app_status).split(': ')
if len(app_status) == 2:
current_status = app_status[1]
if as_boolean:
if app_status[1] == 'up':
current_status = True
else:
current_status = False
if only_cozy and app_status[0] not in SYSTEM_APPS:
apps[app_status[0]] = current_status
else:
apps[app_status[0]] = current_status
# Return app status if get as param or return all apps status
if app_name:
return apps.get(app_name, None)
else:
return apps |
def _remove_complex_types(dictionary):
'''
Linode-python is now returning some complex types that
are not serializable by msgpack. Kill those.
'''
for k, v in six.iteritems(dictionary):
if isinstance(v, dict):
dictionary[k] = _remove_complex_types(v)
elif hasattr(v, 'to_eng_string'):
dictionary[k] = v.to_eng_string()
return dictionary | Linode-python is now returning some complex types that
are not serializable by msgpack. Kill those. | Below is the the instruction that describes the task:
### Input:
Linode-python is now returning some complex types that
are not serializable by msgpack. Kill those.
### Response:
def _remove_complex_types(dictionary):
'''
Linode-python is now returning some complex types that
are not serializable by msgpack. Kill those.
'''
for k, v in six.iteritems(dictionary):
if isinstance(v, dict):
dictionary[k] = _remove_complex_types(v)
elif hasattr(v, 'to_eng_string'):
dictionary[k] = v.to_eng_string()
return dictionary |
def setKeyColor( self, key, color ):
"""
Sets the color used when rendering pie charts.
:param key | <str>
color | <QColor>
"""
self._keyColors[nativestring(key)] = QColor(color) | Sets the color used when rendering pie charts.
:param key | <str>
color | <QColor> | Below is the the instruction that describes the task:
### Input:
Sets the color used when rendering pie charts.
:param key | <str>
color | <QColor>
### Response:
def setKeyColor( self, key, color ):
"""
Sets the color used when rendering pie charts.
:param key | <str>
color | <QColor>
"""
self._keyColors[nativestring(key)] = QColor(color) |
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result | Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult | Below is the the instruction that describes the task:
### Input:
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
### Response:
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result |
def string(self, *pattern, **kwargs):
"""
Add string pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
set_defaults(self._kwargs, kwargs)
set_defaults(self._functional_defaults, kwargs)
set_defaults(self._defaults, kwargs)
pattern = self.rebulk.build_string(*pattern, **kwargs)
part = ChainPart(self, pattern)
self.parts.append(part)
return part | Add string pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype: | Below is the the instruction that describes the task:
### Input:
Add string pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
### Response:
def string(self, *pattern, **kwargs):
"""
Add string pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
set_defaults(self._kwargs, kwargs)
set_defaults(self._functional_defaults, kwargs)
set_defaults(self._defaults, kwargs)
pattern = self.rebulk.build_string(*pattern, **kwargs)
part = ChainPart(self, pattern)
self.parts.append(part)
return part |
def application(self, func):
"""Parse the function application subgrammar.
Function application can, conceptually, be thought of as a mixfix
operator, similar to the way array subscripting works. However, it is
not clear at this point whether we want to allow it to work as such,
because doing so would permit queries to, at runtime, select methods
out of an arbitrary object and then call them.
While there is a function whitelist and preventing this sort of thing
in the syntax isn't a security feature, it still seems like the
syntax should make it clear what the intended use of application is.
If we later decide to extend DottySQL to allow function application
over an arbitrary LHS expression then that syntax would be a strict
superset of the current syntax and backwards compatible.
"""
start = self.tokens.matched.start
if self.tokens.accept(common_grammar.rparen):
# That was easy.
return ast.Apply(func, start=start, end=self.tokens.matched.end,
source=self.original)
arguments = [self.expression()]
while self.tokens.accept(common_grammar.comma):
arguments.append(self.expression())
self.tokens.expect(common_grammar.rparen)
return ast.Apply(func, *arguments, start=start,
end=self.tokens.matched.end, source=self.original) | Parse the function application subgrammar.
Function application can, conceptually, be thought of as a mixfix
operator, similar to the way array subscripting works. However, it is
not clear at this point whether we want to allow it to work as such,
because doing so would permit queries to, at runtime, select methods
out of an arbitrary object and then call them.
While there is a function whitelist and preventing this sort of thing
in the syntax isn't a security feature, it still seems like the
syntax should make it clear what the intended use of application is.
If we later decide to extend DottySQL to allow function application
over an arbitrary LHS expression then that syntax would be a strict
superset of the current syntax and backwards compatible. | Below is the the instruction that describes the task:
### Input:
Parse the function application subgrammar.
Function application can, conceptually, be thought of as a mixfix
operator, similar to the way array subscripting works. However, it is
not clear at this point whether we want to allow it to work as such,
because doing so would permit queries to, at runtime, select methods
out of an arbitrary object and then call them.
While there is a function whitelist and preventing this sort of thing
in the syntax isn't a security feature, it still seems like the
syntax should make it clear what the intended use of application is.
If we later decide to extend DottySQL to allow function application
over an arbitrary LHS expression then that syntax would be a strict
superset of the current syntax and backwards compatible.
### Response:
def application(self, func):
"""Parse the function application subgrammar.
Function application can, conceptually, be thought of as a mixfix
operator, similar to the way array subscripting works. However, it is
not clear at this point whether we want to allow it to work as such,
because doing so would permit queries to, at runtime, select methods
out of an arbitrary object and then call them.
While there is a function whitelist and preventing this sort of thing
in the syntax isn't a security feature, it still seems like the
syntax should make it clear what the intended use of application is.
If we later decide to extend DottySQL to allow function application
over an arbitrary LHS expression then that syntax would be a strict
superset of the current syntax and backwards compatible.
"""
start = self.tokens.matched.start
if self.tokens.accept(common_grammar.rparen):
# That was easy.
return ast.Apply(func, start=start, end=self.tokens.matched.end,
source=self.original)
arguments = [self.expression()]
while self.tokens.accept(common_grammar.comma):
arguments.append(self.expression())
self.tokens.expect(common_grammar.rparen)
return ast.Apply(func, *arguments, start=start,
end=self.tokens.matched.end, source=self.original) |
def rank(matrix, atol=1e-13, rtol=0):
"""
Estimate the rank, i.e., the dimension of the column space, of a matrix.
The algorithm used by this function is based on the singular value
decomposition of `stoichiometry_matrix`.
Parameters
----------
matrix : ndarray
The matrix should be at most 2-D. A 1-D array with length k
will be treated as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than ``atol`` are considered to be zero.
rtol : float
The relative tolerance for a zero singular value. Singular values less
than the relative tolerance times the largest singular value are
considered to be zero.
Notes
-----
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than ``tol`` are considered to be zero.
Returns
-------
int
The estimated rank of the matrix.
See Also
--------
numpy.linalg.matrix_rank
matrix_rank is basically the same as this function, but it does not
provide the option of the absolute tolerance.
"""
matrix = np.atleast_2d(matrix)
sigma = svd(matrix, compute_uv=False)
tol = max(atol, rtol * sigma[0])
return int((sigma >= tol).sum()) | Estimate the rank, i.e., the dimension of the column space, of a matrix.
The algorithm used by this function is based on the singular value
decomposition of `stoichiometry_matrix`.
Parameters
----------
matrix : ndarray
The matrix should be at most 2-D. A 1-D array with length k
will be treated as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than ``atol`` are considered to be zero.
rtol : float
The relative tolerance for a zero singular value. Singular values less
than the relative tolerance times the largest singular value are
considered to be zero.
Notes
-----
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than ``tol`` are considered to be zero.
Returns
-------
int
The estimated rank of the matrix.
See Also
--------
numpy.linalg.matrix_rank
matrix_rank is basically the same as this function, but it does not
provide the option of the absolute tolerance. | Below is the the instruction that describes the task:
### Input:
Estimate the rank, i.e., the dimension of the column space, of a matrix.
The algorithm used by this function is based on the singular value
decomposition of `stoichiometry_matrix`.
Parameters
----------
matrix : ndarray
The matrix should be at most 2-D. A 1-D array with length k
will be treated as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than ``atol`` are considered to be zero.
rtol : float
The relative tolerance for a zero singular value. Singular values less
than the relative tolerance times the largest singular value are
considered to be zero.
Notes
-----
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than ``tol`` are considered to be zero.
Returns
-------
int
The estimated rank of the matrix.
See Also
--------
numpy.linalg.matrix_rank
matrix_rank is basically the same as this function, but it does not
provide the option of the absolute tolerance.
### Response:
def rank(matrix, atol=1e-13, rtol=0):
"""
Estimate the rank, i.e., the dimension of the column space, of a matrix.
The algorithm used by this function is based on the singular value
decomposition of `stoichiometry_matrix`.
Parameters
----------
matrix : ndarray
The matrix should be at most 2-D. A 1-D array with length k
will be treated as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than ``atol`` are considered to be zero.
rtol : float
The relative tolerance for a zero singular value. Singular values less
than the relative tolerance times the largest singular value are
considered to be zero.
Notes
-----
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than ``tol`` are considered to be zero.
Returns
-------
int
The estimated rank of the matrix.
See Also
--------
numpy.linalg.matrix_rank
matrix_rank is basically the same as this function, but it does not
provide the option of the absolute tolerance.
"""
matrix = np.atleast_2d(matrix)
sigma = svd(matrix, compute_uv=False)
tol = max(atol, rtol * sigma[0])
return int((sigma >= tol).sum()) |
def unmatched_quotes_in_line(text):
"""Return whether a string has open quotes.
This simply counts whether the number of quote characters of either
type in the string is odd.
Take from the IPython project (in IPython/core/completer.py in v0.13)
Spyder team: Add some changes to deal with escaped quotes
- Copyright (C) 2008-2011 IPython Development Team
- Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
- Copyright (C) 2001 Python Software Foundation, www.python.org
Distributed under the terms of the BSD License.
"""
# We check " first, then ', so complex cases with nested quotes will
# get the " to take precedence.
text = text.replace("\\'", "")
text = text.replace('\\"', '')
if text.count('"') % 2:
return '"'
elif text.count("'") % 2:
return "'"
else:
return '' | Return whether a string has open quotes.
This simply counts whether the number of quote characters of either
type in the string is odd.
Take from the IPython project (in IPython/core/completer.py in v0.13)
Spyder team: Add some changes to deal with escaped quotes
- Copyright (C) 2008-2011 IPython Development Team
- Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
- Copyright (C) 2001 Python Software Foundation, www.python.org
Distributed under the terms of the BSD License. | Below is the the instruction that describes the task:
### Input:
Return whether a string has open quotes.
This simply counts whether the number of quote characters of either
type in the string is odd.
Take from the IPython project (in IPython/core/completer.py in v0.13)
Spyder team: Add some changes to deal with escaped quotes
- Copyright (C) 2008-2011 IPython Development Team
- Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
- Copyright (C) 2001 Python Software Foundation, www.python.org
Distributed under the terms of the BSD License.
### Response:
def unmatched_quotes_in_line(text):
"""Return whether a string has open quotes.
This simply counts whether the number of quote characters of either
type in the string is odd.
Take from the IPython project (in IPython/core/completer.py in v0.13)
Spyder team: Add some changes to deal with escaped quotes
- Copyright (C) 2008-2011 IPython Development Team
- Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
- Copyright (C) 2001 Python Software Foundation, www.python.org
Distributed under the terms of the BSD License.
"""
# We check " first, then ', so complex cases with nested quotes will
# get the " to take precedence.
text = text.replace("\\'", "")
text = text.replace('\\"', '')
if text.count('"') % 2:
return '"'
elif text.count("'") % 2:
return "'"
else:
return '' |
def sanitize_codon_list(codon_list, forbidden_seqs=()):
"""
Make silent mutations to the given codon lists to remove any undesirable
sequences that are present within it. Undesirable sequences include
restriction sites, which may be optionally specified as a second argument,
and homopolymers above a pre-defined length. The return value is the
number of corrections made to the codon list.
"""
# Unit test missing for:
# Homopolymer fixing
for codon in codon_list:
if len(codon) != 3:
raise ValueError("Codons must have exactly 3 bases: '{}'".format(codon))
# Compile a collection of all the sequences we don't want to appear in the
# gene. This includes the given restriction sites and their reverse
# complements, plus any homopolymers above a pre-defined length.
bad_seqs = set()
bad_seqs.union(
restriction_sites.get(seq, seq)
for seq in forbidden_seqs)
bad_seqs.union(
dna.reverse_complement(seq)
for seq in bad_seqs)
bad_seqs.union(
base * (gen9.homopolymer_max_lengths[base] + 1)
for base in dna.dna_bases)
bad_seqs = [
dna.dna_to_re(bs)
for bs in bad_seqs]
# Remove every bad sequence from the gene by making silent mutations to the
# codon list.
num_corrections = 0
for bad_seq in bad_seqs:
while remove_bad_sequence(codon_list, bad_seq, bad_seqs):
num_corrections += 1
return num_corrections | Make silent mutations to the given codon lists to remove any undesirable
sequences that are present within it. Undesirable sequences include
restriction sites, which may be optionally specified as a second argument,
and homopolymers above a pre-defined length. The return value is the
number of corrections made to the codon list. | Below is the the instruction that describes the task:
### Input:
Make silent mutations to the given codon lists to remove any undesirable
sequences that are present within it. Undesirable sequences include
restriction sites, which may be optionally specified as a second argument,
and homopolymers above a pre-defined length. The return value is the
number of corrections made to the codon list.
### Response:
def sanitize_codon_list(codon_list, forbidden_seqs=()):
"""
Make silent mutations to the given codon lists to remove any undesirable
sequences that are present within it. Undesirable sequences include
restriction sites, which may be optionally specified as a second argument,
and homopolymers above a pre-defined length. The return value is the
number of corrections made to the codon list.
"""
# Unit test missing for:
# Homopolymer fixing
for codon in codon_list:
if len(codon) != 3:
raise ValueError("Codons must have exactly 3 bases: '{}'".format(codon))
# Compile a collection of all the sequences we don't want to appear in the
# gene. This includes the given restriction sites and their reverse
# complements, plus any homopolymers above a pre-defined length.
bad_seqs = set()
bad_seqs.union(
restriction_sites.get(seq, seq)
for seq in forbidden_seqs)
bad_seqs.union(
dna.reverse_complement(seq)
for seq in bad_seqs)
bad_seqs.union(
base * (gen9.homopolymer_max_lengths[base] + 1)
for base in dna.dna_bases)
bad_seqs = [
dna.dna_to_re(bs)
for bs in bad_seqs]
# Remove every bad sequence from the gene by making silent mutations to the
# codon list.
num_corrections = 0
for bad_seq in bad_seqs:
while remove_bad_sequence(codon_list, bad_seq, bad_seqs):
num_corrections += 1
return num_corrections |
def _bind_and_call_constructor(self, t: type, *args) -> None:
"""
Accesses the __init__ method of a type directly and calls it with *args
This allows the constructors of both superclasses to be called, as described in get_binding.md
This could be done using two calls to super() with a hack based on how Python searches __mro__:
```
super().__init__(run, parent) # calls greenlet.greenlet.__init__
super(greenlet.greenlet, self).__init__() # calls TaggedObject.__init__
```
Python will always find greenlet.greenlet first since it is specified first, but will ignore it if it is
the first argument to super, which is meant to indicate the subclass and thus is not meant to be called on
See: https://docs.python.org/3.7/library/functions.html#super
This is indirect, confusing, and not in following with the purpose of super(), so the direct method was used
"""
t.__init__.__get__(self)(*args) | Accesses the __init__ method of a type directly and calls it with *args
This allows the constructors of both superclasses to be called, as described in get_binding.md
This could be done using two calls to super() with a hack based on how Python searches __mro__:
```
super().__init__(run, parent) # calls greenlet.greenlet.__init__
super(greenlet.greenlet, self).__init__() # calls TaggedObject.__init__
```
Python will always find greenlet.greenlet first since it is specified first, but will ignore it if it is
the first argument to super, which is meant to indicate the subclass and thus is not meant to be called on
See: https://docs.python.org/3.7/library/functions.html#super
This is indirect, confusing, and not in following with the purpose of super(), so the direct method was used | Below is the the instruction that describes the task:
### Input:
Accesses the __init__ method of a type directly and calls it with *args
This allows the constructors of both superclasses to be called, as described in get_binding.md
This could be done using two calls to super() with a hack based on how Python searches __mro__:
```
super().__init__(run, parent) # calls greenlet.greenlet.__init__
super(greenlet.greenlet, self).__init__() # calls TaggedObject.__init__
```
Python will always find greenlet.greenlet first since it is specified first, but will ignore it if it is
the first argument to super, which is meant to indicate the subclass and thus is not meant to be called on
See: https://docs.python.org/3.7/library/functions.html#super
This is indirect, confusing, and not in following with the purpose of super(), so the direct method was used
### Response:
def _bind_and_call_constructor(self, t: type, *args) -> None:
"""
Accesses the __init__ method of a type directly and calls it with *args
This allows the constructors of both superclasses to be called, as described in get_binding.md
This could be done using two calls to super() with a hack based on how Python searches __mro__:
```
super().__init__(run, parent) # calls greenlet.greenlet.__init__
super(greenlet.greenlet, self).__init__() # calls TaggedObject.__init__
```
Python will always find greenlet.greenlet first since it is specified first, but will ignore it if it is
the first argument to super, which is meant to indicate the subclass and thus is not meant to be called on
See: https://docs.python.org/3.7/library/functions.html#super
This is indirect, confusing, and not in following with the purpose of super(), so the direct method was used
"""
t.__init__.__get__(self)(*args) |
def clear(self):
"""Removes all SSH keys from a user's system."""
r = self._h._http_resource(
method='DELETE',
resource=('user', 'keys'),
)
return r.ok | Removes all SSH keys from a user's system. | Below is the the instruction that describes the task:
### Input:
Removes all SSH keys from a user's system.
### Response:
def clear(self):
"""Removes all SSH keys from a user's system."""
r = self._h._http_resource(
method='DELETE',
resource=('user', 'keys'),
)
return r.ok |
def cli(ctx, feature_id, symbol, organism="", sequence=""):
"""Set a feature's description
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.set_symbol(feature_id, symbol, organism=organism, sequence=sequence) | Set a feature's description
Output:
A standard apollo feature dictionary ({"features": [{...}]}) | Below is the the instruction that describes the task:
### Input:
Set a feature's description
Output:
A standard apollo feature dictionary ({"features": [{...}]})
### Response:
def cli(ctx, feature_id, symbol, organism="", sequence=""):
"""Set a feature's description
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.set_symbol(feature_id, symbol, organism=organism, sequence=sequence) |
def get_redirect_args(self, request, callback):
"Get request parameters for redirect url."
callback = force_text(request.build_absolute_uri(callback))
raw_token = self.get_request_token(request, callback)
token, secret = self.parse_raw_token(raw_token)
if token is not None and secret is not None:
request.session[self.session_key] = raw_token
return {
'oauth_token': token,
'oauth_callback': callback,
} | Get request parameters for redirect url. | Below is the the instruction that describes the task:
### Input:
Get request parameters for redirect url.
### Response:
def get_redirect_args(self, request, callback):
"Get request parameters for redirect url."
callback = force_text(request.build_absolute_uri(callback))
raw_token = self.get_request_token(request, callback)
token, secret = self.parse_raw_token(raw_token)
if token is not None and secret is not None:
request.session[self.session_key] = raw_token
return {
'oauth_token': token,
'oauth_callback': callback,
} |
def add_subject(self, subject_id, attributes, parents=[],
issuer='default'):
"""
Will add the given subject with a given identifier and attribute
dictionary.
example/
add_subject('/user/j12y', {'username': 'j12y'})
"""
# MAINT: consider test to avoid adding duplicate subject id
assert isinstance(attributes, (dict)), "attributes expected to be dict"
attrs = []
for key in attributes.keys():
attrs.append({
'issuer': issuer,
'name': key,
'value': attributes[key]
})
body = {
"subjectIdentifier": subject_id,
"parents": parents,
"attributes": attrs,
}
return self._put_subject(subject_id, body) | Will add the given subject with a given identifier and attribute
dictionary.
example/
add_subject('/user/j12y', {'username': 'j12y'}) | Below is the the instruction that describes the task:
### Input:
Will add the given subject with a given identifier and attribute
dictionary.
example/
add_subject('/user/j12y', {'username': 'j12y'})
### Response:
def add_subject(self, subject_id, attributes, parents=[],
issuer='default'):
"""
Will add the given subject with a given identifier and attribute
dictionary.
example/
add_subject('/user/j12y', {'username': 'j12y'})
"""
# MAINT: consider test to avoid adding duplicate subject id
assert isinstance(attributes, (dict)), "attributes expected to be dict"
attrs = []
for key in attributes.keys():
attrs.append({
'issuer': issuer,
'name': key,
'value': attributes[key]
})
body = {
"subjectIdentifier": subject_id,
"parents": parents,
"attributes": attrs,
}
return self._put_subject(subject_id, body) |
def _get_path(path):
"""
Fetch the string value from a path-like object
Returns **None** if there is no string value.
"""
if isinstance(path, (six.string_types, bytes)):
return path
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
return
if isinstance(path_repr, (six.string_types, bytes)):
return path_repr
return | Fetch the string value from a path-like object
Returns **None** if there is no string value. | Below is the the instruction that describes the task:
### Input:
Fetch the string value from a path-like object
Returns **None** if there is no string value.
### Response:
def _get_path(path):
"""
Fetch the string value from a path-like object
Returns **None** if there is no string value.
"""
if isinstance(path, (six.string_types, bytes)):
return path
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
return
if isinstance(path_repr, (six.string_types, bytes)):
return path_repr
return |
def set_available(self, show=None):
"""
Sets the agent availability to True.
Args:
show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None)
"""
show = self.state.show if show is None else show
self.set_presence(PresenceState(available=True, show=show)) | Sets the agent availability to True.
Args:
show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None) | Below is the the instruction that describes the task:
### Input:
Sets the agent availability to True.
Args:
show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None)
### Response:
def set_available(self, show=None):
"""
Sets the agent availability to True.
Args:
show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None)
"""
show = self.state.show if show is None else show
self.set_presence(PresenceState(available=True, show=show)) |
def get_answers(self):
"""
Returns a {(key,value), ...} dictionary of {(instance_id,Answer),...)}
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> inst2ans = coarse_wsd.get_answers()
>>> for inst in inst2ans:
... print inst, inst2ans[inst
... break
"""
inst2ans = {}
with io.open(self.test_ans, 'r') as fin:
for line in fin:
line, _, lemma = line.strip().rpartition(' !! ')
lemma, pos = lemma[6:].split('#')
textid, _, line = line.partition(' ')
instid, _, line = line.partition(' ')
sensekey = line.split()
# What to do if there is no synset to convert to...
# synsetkey = [semcor_to_synset(i) for i in sensekey]
inst2ans[instid] = Answer(sensekey, lemma, pos)
return inst2ans | Returns a {(key,value), ...} dictionary of {(instance_id,Answer),...)}
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> inst2ans = coarse_wsd.get_answers()
>>> for inst in inst2ans:
... print inst, inst2ans[inst
... break | Below is the the instruction that describes the task:
### Input:
Returns a {(key,value), ...} dictionary of {(instance_id,Answer),...)}
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> inst2ans = coarse_wsd.get_answers()
>>> for inst in inst2ans:
... print inst, inst2ans[inst
... break
### Response:
def get_answers(self):
"""
Returns a {(key,value), ...} dictionary of {(instance_id,Answer),...)}
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> inst2ans = coarse_wsd.get_answers()
>>> for inst in inst2ans:
... print inst, inst2ans[inst
... break
"""
inst2ans = {}
with io.open(self.test_ans, 'r') as fin:
for line in fin:
line, _, lemma = line.strip().rpartition(' !! ')
lemma, pos = lemma[6:].split('#')
textid, _, line = line.partition(' ')
instid, _, line = line.partition(' ')
sensekey = line.split()
# What to do if there is no synset to convert to...
# synsetkey = [semcor_to_synset(i) for i in sensekey]
inst2ans[instid] = Answer(sensekey, lemma, pos)
return inst2ans |
def isInfinite(self):
"""Check if rectangle is infinite."""
return self.x0 > self.x1 or self.y0 > self.y1 | Check if rectangle is infinite. | Below is the the instruction that describes the task:
### Input:
Check if rectangle is infinite.
### Response:
def isInfinite(self):
"""Check if rectangle is infinite."""
return self.x0 > self.x1 or self.y0 > self.y1 |
def decrypt_cbc_cts(self, data, init_vector):
"""
Return an iterator that decrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError("data is not greater than 8 bytes in length")
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
decrypt = self._decrypt
try:
prev_cipher_L, prev_cipher_R = u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
last_block_start_i = last_block_stop_i - 8
for cipher_L, cipher_R in self._u4_2_iter_unpack(
data[0:last_block_start_i]
):
L, R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
prev_cipher_L = cipher_L
prev_cipher_R = cipher_R
cipher_L, cipher_R = u4_2_unpack(data[last_block_start_i:last_block_stop_i])
L, R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
C_L, C_R = u4_2_unpack(data[last_block_stop_i:] + bytes(8 - extra_bytes))
Xn = u4_2_pack(L ^ C_L, R ^ C_R)
E_L, E_R = u4_2_unpack(data[last_block_stop_i:] + Xn[extra_bytes:])
L, R = decrypt(
E_L, E_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
yield Xn[:extra_bytes] | Return an iterator that decrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised. | Below is the the instruction that describes the task:
### Input:
Return an iterator that decrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
### Response:
def decrypt_cbc_cts(self, data, init_vector):
"""
Return an iterator that decrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError("data is not greater than 8 bytes in length")
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
decrypt = self._decrypt
try:
prev_cipher_L, prev_cipher_R = u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
last_block_start_i = last_block_stop_i - 8
for cipher_L, cipher_R in self._u4_2_iter_unpack(
data[0:last_block_start_i]
):
L, R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
prev_cipher_L = cipher_L
prev_cipher_R = cipher_R
cipher_L, cipher_R = u4_2_unpack(data[last_block_start_i:last_block_stop_i])
L, R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
C_L, C_R = u4_2_unpack(data[last_block_stop_i:] + bytes(8 - extra_bytes))
Xn = u4_2_pack(L ^ C_L, R ^ C_R)
E_L, E_R = u4_2_unpack(data[last_block_stop_i:] + Xn[extra_bytes:])
L, R = decrypt(
E_L, E_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
yield Xn[:extra_bytes] |
def write_pid(self, pid=None):
"""Write the current processes PID to the pidfile location"""
pid = pid or os.getpid()
self.write_metadata_by_name(self._name, 'pid', str(pid)) | Write the current processes PID to the pidfile location | Below is the the instruction that describes the task:
### Input:
Write the current processes PID to the pidfile location
### Response:
def write_pid(self, pid=None):
"""Write the current processes PID to the pidfile location"""
pid = pid or os.getpid()
self.write_metadata_by_name(self._name, 'pid', str(pid)) |
def create_figure(*fig_args, **fig_kwargs):
'''Create a single figure.
Args and Kwargs are passed to `matplotlib.figure.Figure`.
This routine is provided in order to avoid usage of pyplot which
is stateful and not thread safe. As drawing routines in tf-matplotlib
are called from py-funcs in their respective thread, avoid usage
of pyplot where possible.
'''
fig = Figure(*fig_args, **fig_kwargs)
# Attach canvas
FigureCanvas(fig)
return fig | Create a single figure.
Args and Kwargs are passed to `matplotlib.figure.Figure`.
This routine is provided in order to avoid usage of pyplot which
is stateful and not thread safe. As drawing routines in tf-matplotlib
are called from py-funcs in their respective thread, avoid usage
of pyplot where possible. | Below is the the instruction that describes the task:
### Input:
Create a single figure.
Args and Kwargs are passed to `matplotlib.figure.Figure`.
This routine is provided in order to avoid usage of pyplot which
is stateful and not thread safe. As drawing routines in tf-matplotlib
are called from py-funcs in their respective thread, avoid usage
of pyplot where possible.
### Response:
def create_figure(*fig_args, **fig_kwargs):
'''Create a single figure.
Args and Kwargs are passed to `matplotlib.figure.Figure`.
This routine is provided in order to avoid usage of pyplot which
is stateful and not thread safe. As drawing routines in tf-matplotlib
are called from py-funcs in their respective thread, avoid usage
of pyplot where possible.
'''
fig = Figure(*fig_args, **fig_kwargs)
# Attach canvas
FigureCanvas(fig)
return fig |
def get_contradictory_pairs(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]:
"""Iterates over contradictory node pairs in the graph based on their causal relationships
:return: An iterator over (source, target) node pairs that have contradictory causal edges
"""
for u, v in graph.edges():
if pair_has_contradiction(graph, u, v):
yield u, v | Iterates over contradictory node pairs in the graph based on their causal relationships
:return: An iterator over (source, target) node pairs that have contradictory causal edges | Below is the the instruction that describes the task:
### Input:
Iterates over contradictory node pairs in the graph based on their causal relationships
:return: An iterator over (source, target) node pairs that have contradictory causal edges
### Response:
def get_contradictory_pairs(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]:
"""Iterates over contradictory node pairs in the graph based on their causal relationships
:return: An iterator over (source, target) node pairs that have contradictory causal edges
"""
for u, v in graph.edges():
if pair_has_contradiction(graph, u, v):
yield u, v |
def online_time_to_string(value, timeFormat, utcOffset=0):
"""Converts AGOL timestamp to formatted string.
Args:
value (float): A UTC timestamp as reported by AGOL (time in ms since Unix epoch * 1000)
timeFormat (str): Date/Time format string as parsed by :py:func:`datetime.strftime`.
utcOffset (int): Hours difference from UTC and desired output. Default is 0 (remain in UTC).
Returns:
str: A string representation of the timestamp.
Examples:
>>> arcresthelper.common.online_time_to_string(1457167261000.0, "%Y-%m-%d %H:%M:%S")
'2016-03-05 00:41:01'
>>> arcresthelper.common.online_time_to_string(731392515000.0, '%m/%d/%Y %H:%M:%S', -8) # PST is UTC-8:00
'03/05/1993 12:35:15'
See Also:
:py:func:`local_time_to_online` for converting a :py:class:`datetime.datetime` object to AGOL timestamp
"""
try:
return datetime.datetime.fromtimestamp(value/1000 + utcOffset*3600).strftime(timeFormat)
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "online_time_to_string",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
pass | Converts AGOL timestamp to formatted string.
Args:
value (float): A UTC timestamp as reported by AGOL (time in ms since Unix epoch * 1000)
timeFormat (str): Date/Time format string as parsed by :py:func:`datetime.strftime`.
utcOffset (int): Hours difference from UTC and desired output. Default is 0 (remain in UTC).
Returns:
str: A string representation of the timestamp.
Examples:
>>> arcresthelper.common.online_time_to_string(1457167261000.0, "%Y-%m-%d %H:%M:%S")
'2016-03-05 00:41:01'
>>> arcresthelper.common.online_time_to_string(731392515000.0, '%m/%d/%Y %H:%M:%S', -8) # PST is UTC-8:00
'03/05/1993 12:35:15'
See Also:
:py:func:`local_time_to_online` for converting a :py:class:`datetime.datetime` object to AGOL timestamp | Below is the the instruction that describes the task:
### Input:
Converts AGOL timestamp to formatted string.
Args:
value (float): A UTC timestamp as reported by AGOL (time in ms since Unix epoch * 1000)
timeFormat (str): Date/Time format string as parsed by :py:func:`datetime.strftime`.
utcOffset (int): Hours difference from UTC and desired output. Default is 0 (remain in UTC).
Returns:
str: A string representation of the timestamp.
Examples:
>>> arcresthelper.common.online_time_to_string(1457167261000.0, "%Y-%m-%d %H:%M:%S")
'2016-03-05 00:41:01'
>>> arcresthelper.common.online_time_to_string(731392515000.0, '%m/%d/%Y %H:%M:%S', -8) # PST is UTC-8:00
'03/05/1993 12:35:15'
See Also:
:py:func:`local_time_to_online` for converting a :py:class:`datetime.datetime` object to AGOL timestamp
### Response:
def online_time_to_string(value, timeFormat, utcOffset=0):
"""Converts AGOL timestamp to formatted string.
Args:
value (float): A UTC timestamp as reported by AGOL (time in ms since Unix epoch * 1000)
timeFormat (str): Date/Time format string as parsed by :py:func:`datetime.strftime`.
utcOffset (int): Hours difference from UTC and desired output. Default is 0 (remain in UTC).
Returns:
str: A string representation of the timestamp.
Examples:
>>> arcresthelper.common.online_time_to_string(1457167261000.0, "%Y-%m-%d %H:%M:%S")
'2016-03-05 00:41:01'
>>> arcresthelper.common.online_time_to_string(731392515000.0, '%m/%d/%Y %H:%M:%S', -8) # PST is UTC-8:00
'03/05/1993 12:35:15'
See Also:
:py:func:`local_time_to_online` for converting a :py:class:`datetime.datetime` object to AGOL timestamp
"""
try:
return datetime.datetime.fromtimestamp(value/1000 + utcOffset*3600).strftime(timeFormat)
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "online_time_to_string",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
pass |
def get_revision():
"""
:returns: Revision number of this branch/checkout, if available. None if
no revision number can be determined.
"""
package_dir = os.path.dirname(__file__)
checkout_dir = os.path.normpath(os.path.join(package_dir, '..'))
path = os.path.join(checkout_dir, '.git')
if os.path.exists(path):
return _get_git_revision(path)
return None | :returns: Revision number of this branch/checkout, if available. None if
no revision number can be determined. | Below is the the instruction that describes the task:
### Input:
:returns: Revision number of this branch/checkout, if available. None if
no revision number can be determined.
### Response:
def get_revision():
"""
:returns: Revision number of this branch/checkout, if available. None if
no revision number can be determined.
"""
package_dir = os.path.dirname(__file__)
checkout_dir = os.path.normpath(os.path.join(package_dir, '..'))
path = os.path.join(checkout_dir, '.git')
if os.path.exists(path):
return _get_git_revision(path)
return None |
def set_lowest_numeric_score(self, score):
"""Sets the lowest numeric score.
arg: score (decimal): the lowest numeric score
raise: InvalidArgument - ``score`` is invalid
raise: NoAccess - ``score`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score
if self.get_lowest_numeric_score_metadata().is_read_only():
raise errors.NoAccess()
try:
score = float(score)
except ValueError:
raise errors.InvalidArgument()
if not self._is_valid_decimal(score, self.get_lowest_numeric_score_metadata()):
raise errors.InvalidArgument()
self._my_map['lowestNumericScore'] = score | Sets the lowest numeric score.
arg: score (decimal): the lowest numeric score
raise: InvalidArgument - ``score`` is invalid
raise: NoAccess - ``score`` cannot be modified
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the lowest numeric score.
arg: score (decimal): the lowest numeric score
raise: InvalidArgument - ``score`` is invalid
raise: NoAccess - ``score`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_lowest_numeric_score(self, score):
"""Sets the lowest numeric score.
arg: score (decimal): the lowest numeric score
raise: InvalidArgument - ``score`` is invalid
raise: NoAccess - ``score`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score
if self.get_lowest_numeric_score_metadata().is_read_only():
raise errors.NoAccess()
try:
score = float(score)
except ValueError:
raise errors.InvalidArgument()
if not self._is_valid_decimal(score, self.get_lowest_numeric_score_metadata()):
raise errors.InvalidArgument()
self._my_map['lowestNumericScore'] = score |
def _enumload(l: Loader, value, type_) -> Enum:
"""
This loads something into an Enum.
It tries with basic types first.
If that fails, it tries to look for type annotations inside the
Enum, and tries to use those to load the value into something
that is compatible with the Enum.
Of course if that fails too, a ValueError is raised.
"""
try:
# Try naïve conversion
return type_(value)
except:
pass
# Try with the typing hints
for _, t in get_type_hints(type_).items():
try:
return type_(l.load(value, t))
except:
pass
raise TypedloadValueError(
'Value could not be loaded into %s' % type_,
value=value,
type_=type_
) | This loads something into an Enum.
It tries with basic types first.
If that fails, it tries to look for type annotations inside the
Enum, and tries to use those to load the value into something
that is compatible with the Enum.
Of course if that fails too, a ValueError is raised. | Below is the the instruction that describes the task:
### Input:
This loads something into an Enum.
It tries with basic types first.
If that fails, it tries to look for type annotations inside the
Enum, and tries to use those to load the value into something
that is compatible with the Enum.
Of course if that fails too, a ValueError is raised.
### Response:
def _enumload(l: Loader, value, type_) -> Enum:
"""
This loads something into an Enum.
It tries with basic types first.
If that fails, it tries to look for type annotations inside the
Enum, and tries to use those to load the value into something
that is compatible with the Enum.
Of course if that fails too, a ValueError is raised.
"""
try:
# Try naïve conversion
return type_(value)
except:
pass
# Try with the typing hints
for _, t in get_type_hints(type_).items():
try:
return type_(l.load(value, t))
except:
pass
raise TypedloadValueError(
'Value could not be loaded into %s' % type_,
value=value,
type_=type_
) |
def apply_text(incoming, func):
"""Call `func` on text portions of incoming color string.
:param iter incoming: Incoming string/ColorStr/string-like object to iterate.
:param func: Function to call with string portion as first and only parameter.
:return: Modified string, same class type as incoming string.
"""
split = RE_SPLIT.split(incoming)
for i, item in enumerate(split):
if not item or RE_SPLIT.match(item):
continue
split[i] = func(item)
return incoming.__class__().join(split) | Call `func` on text portions of incoming color string.
:param iter incoming: Incoming string/ColorStr/string-like object to iterate.
:param func: Function to call with string portion as first and only parameter.
:return: Modified string, same class type as incoming string. | Below is the the instruction that describes the task:
### Input:
Call `func` on text portions of incoming color string.
:param iter incoming: Incoming string/ColorStr/string-like object to iterate.
:param func: Function to call with string portion as first and only parameter.
:return: Modified string, same class type as incoming string.
### Response:
def apply_text(incoming, func):
"""Call `func` on text portions of incoming color string.
:param iter incoming: Incoming string/ColorStr/string-like object to iterate.
:param func: Function to call with string portion as first and only parameter.
:return: Modified string, same class type as incoming string.
"""
split = RE_SPLIT.split(incoming)
for i, item in enumerate(split):
if not item or RE_SPLIT.match(item):
continue
split[i] = func(item)
return incoming.__class__().join(split) |
def get_jobs(self, limit=10, skip=0, backend=None, only_completed=False, filter=None, hub=None, group=None, project=None, access_token=None, user_id=None):
"""
Get the information about the user jobs
"""
if access_token:
self.req.credential.set_token(access_token)
if user_id:
self.req.credential.set_user_id(user_id)
if not self.check_credentials():
return {"error": "Not credentials valid"}
url = get_job_url(self.config, hub, group, project)
url_filter = '&filter='
query = {
"order": "creationDate DESC",
"limit": limit,
"skip": skip,
"where" : {}
}
if filter is not None:
query['where'] = filter
else:
if backend is not None:
query['where']['backend.name'] = backend
if only_completed:
query['where']['status'] = 'COMPLETED'
url_filter = url_filter + json.dumps(query)
jobs = self.req.get(url, url_filter)
return jobs | Get the information about the user jobs | Below is the the instruction that describes the task:
### Input:
Get the information about the user jobs
### Response:
def get_jobs(self, limit=10, skip=0, backend=None, only_completed=False, filter=None, hub=None, group=None, project=None, access_token=None, user_id=None):
"""
Get the information about the user jobs
"""
if access_token:
self.req.credential.set_token(access_token)
if user_id:
self.req.credential.set_user_id(user_id)
if not self.check_credentials():
return {"error": "Not credentials valid"}
url = get_job_url(self.config, hub, group, project)
url_filter = '&filter='
query = {
"order": "creationDate DESC",
"limit": limit,
"skip": skip,
"where" : {}
}
if filter is not None:
query['where'] = filter
else:
if backend is not None:
query['where']['backend.name'] = backend
if only_completed:
query['where']['status'] = 'COMPLETED'
url_filter = url_filter + json.dumps(query)
jobs = self.req.get(url, url_filter)
return jobs |
def querying_context(self, packet_type):
""" Context manager for querying.
Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block,
and to TDS_PENDING if managed block succeeds and flushes buffer.
"""
if self.set_state(tds_base.TDS_QUERYING) != tds_base.TDS_QUERYING:
raise tds_base.Error("Couldn't switch to state")
self._writer.begin_packet(packet_type)
try:
yield
except:
if self.state != tds_base.TDS_DEAD:
self.set_state(tds_base.TDS_IDLE)
raise
else:
self.set_state(tds_base.TDS_PENDING)
self._writer.flush() | Context manager for querying.
Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block,
and to TDS_PENDING if managed block succeeds and flushes buffer. | Below is the the instruction that describes the task:
### Input:
Context manager for querying.
Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block,
and to TDS_PENDING if managed block succeeds and flushes buffer.
### Response:
def querying_context(self, packet_type):
""" Context manager for querying.
Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block,
and to TDS_PENDING if managed block succeeds and flushes buffer.
"""
if self.set_state(tds_base.TDS_QUERYING) != tds_base.TDS_QUERYING:
raise tds_base.Error("Couldn't switch to state")
self._writer.begin_packet(packet_type)
try:
yield
except:
if self.state != tds_base.TDS_DEAD:
self.set_state(tds_base.TDS_IDLE)
raise
else:
self.set_state(tds_base.TDS_PENDING)
self._writer.flush() |
def cmd_gimbal_mode(self, args):
'''control gimbal mode'''
if len(args) != 1:
print("usage: gimbal mode <GPS|MAVLink>")
return
if args[0].upper() == 'GPS':
mode = mavutil.mavlink.MAV_MOUNT_MODE_GPS_POINT
elif args[0].upper() == 'MAVLINK':
mode = mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING
elif args[0].upper() == 'RC':
mode = mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING
else:
print("Unsupported mode %s" % args[0])
self.master.mav.mount_configure_send(self.target_system,
self.target_component,
mode,
1, 1, 1) | control gimbal mode | Below is the the instruction that describes the task:
### Input:
control gimbal mode
### Response:
def cmd_gimbal_mode(self, args):
'''control gimbal mode'''
if len(args) != 1:
print("usage: gimbal mode <GPS|MAVLink>")
return
if args[0].upper() == 'GPS':
mode = mavutil.mavlink.MAV_MOUNT_MODE_GPS_POINT
elif args[0].upper() == 'MAVLINK':
mode = mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING
elif args[0].upper() == 'RC':
mode = mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING
else:
print("Unsupported mode %s" % args[0])
self.master.mav.mount_configure_send(self.target_system,
self.target_component,
mode,
1, 1, 1) |
def _set_index(self, schema, name, fields, **index_options):
"""
NOTE -- we set the index name using <table_name>_<name> format since indexes have to have
a globally unique name in postgres
http://www.postgresql.org/docs/9.1/static/sql-createindex.html
"""
index_fields = []
for field_name in fields:
field = schema.fields[field_name]
if issubclass(field.type, basestring):
if field.options.get('ignore_case', False):
field_name = 'UPPER({})'.format(self._normalize_name(field_name))
index_fields.append(field_name)
query_str = 'CREATE {}INDEX {} ON {} USING BTREE ({})'.format(
'UNIQUE ' if index_options.get('unique', False) else '',
self._normalize_name("{}_{}".format(schema, name)),
self._normalize_table_name(schema),
', '.join(index_fields)
)
return self.query(query_str, ignore_result=True, **index_options) | NOTE -- we set the index name using <table_name>_<name> format since indexes have to have
a globally unique name in postgres
http://www.postgresql.org/docs/9.1/static/sql-createindex.html | Below is the the instruction that describes the task:
### Input:
NOTE -- we set the index name using <table_name>_<name> format since indexes have to have
a globally unique name in postgres
http://www.postgresql.org/docs/9.1/static/sql-createindex.html
### Response:
def _set_index(self, schema, name, fields, **index_options):
"""
NOTE -- we set the index name using <table_name>_<name> format since indexes have to have
a globally unique name in postgres
http://www.postgresql.org/docs/9.1/static/sql-createindex.html
"""
index_fields = []
for field_name in fields:
field = schema.fields[field_name]
if issubclass(field.type, basestring):
if field.options.get('ignore_case', False):
field_name = 'UPPER({})'.format(self._normalize_name(field_name))
index_fields.append(field_name)
query_str = 'CREATE {}INDEX {} ON {} USING BTREE ({})'.format(
'UNIQUE ' if index_options.get('unique', False) else '',
self._normalize_name("{}_{}".format(schema, name)),
self._normalize_table_name(schema),
', '.join(index_fields)
)
return self.query(query_str, ignore_result=True, **index_options) |
def _get_translation(self, field, code):
"""
Gets the translation of a specific field for a specific language code.
This raises ObjectDoesNotExist if the lookup was unsuccesful. As of
today, this stuff is cached. As the cache is rather aggressive it
might cause rather strange effects. However, we would see the same
effects when an ordinary object is changed which is already in memory:
the old state would remain.
"""
if not code in self._translation_cache:
translations = self.translations.select_related()
logger.debug(
u'Matched with field %s for language %s. Attempting lookup.',
field, code
)
try:
translation_obj = translations.get(language_code=code)
except ObjectDoesNotExist:
translation_obj = None
self._translation_cache[code] = translation_obj
logger.debug(u'Translation not found in cache.')
else:
logger.debug(u'Translation found in cache.')
# Get the translation from the cache
translation_obj = self._translation_cache.get(code)
# If this is none, it means that a translation does not exist
# It is important to cache this one as well
if not translation_obj:
raise ObjectDoesNotExist
field_value = getattr(translation_obj, field)
logger.debug(
u'Found translation object %s, returning value %s.',
translation_obj, field_value
)
return field_value | Gets the translation of a specific field for a specific language code.
This raises ObjectDoesNotExist if the lookup was unsuccesful. As of
today, this stuff is cached. As the cache is rather aggressive it
might cause rather strange effects. However, we would see the same
effects when an ordinary object is changed which is already in memory:
the old state would remain. | Below is the the instruction that describes the task:
### Input:
Gets the translation of a specific field for a specific language code.
This raises ObjectDoesNotExist if the lookup was unsuccesful. As of
today, this stuff is cached. As the cache is rather aggressive it
might cause rather strange effects. However, we would see the same
effects when an ordinary object is changed which is already in memory:
the old state would remain.
### Response:
def _get_translation(self, field, code):
"""
Gets the translation of a specific field for a specific language code.
This raises ObjectDoesNotExist if the lookup was unsuccesful. As of
today, this stuff is cached. As the cache is rather aggressive it
might cause rather strange effects. However, we would see the same
effects when an ordinary object is changed which is already in memory:
the old state would remain.
"""
if not code in self._translation_cache:
translations = self.translations.select_related()
logger.debug(
u'Matched with field %s for language %s. Attempting lookup.',
field, code
)
try:
translation_obj = translations.get(language_code=code)
except ObjectDoesNotExist:
translation_obj = None
self._translation_cache[code] = translation_obj
logger.debug(u'Translation not found in cache.')
else:
logger.debug(u'Translation found in cache.')
# Get the translation from the cache
translation_obj = self._translation_cache.get(code)
# If this is none, it means that a translation does not exist
# It is important to cache this one as well
if not translation_obj:
raise ObjectDoesNotExist
field_value = getattr(translation_obj, field)
logger.debug(
u'Found translation object %s, returning value %s.',
translation_obj, field_value
)
return field_value |
def _computeChart(chart, date):
""" Internal function to return a new chart for
a specific date using properties from old chart.
"""
pos = chart.pos
hsys = chart.hsys
IDs = [obj.id for obj in chart.objects]
return Chart(date, pos, IDs=IDs, hsys=hsys) | Internal function to return a new chart for
a specific date using properties from old chart. | Below is the the instruction that describes the task:
### Input:
Internal function to return a new chart for
a specific date using properties from old chart.
### Response:
def _computeChart(chart, date):
""" Internal function to return a new chart for
a specific date using properties from old chart.
"""
pos = chart.pos
hsys = chart.hsys
IDs = [obj.id for obj in chart.objects]
return Chart(date, pos, IDs=IDs, hsys=hsys) |
def DbGetClassForDevice(self, argin):
""" Get Tango class for the specified device.
:param argin: Device name
:type: tango.DevString
:return: Device Tango class
:rtype: tango.DevString """
self._log.debug("In DbGetClassForDevice()")
return self.db.get_class_for_device(argin) | Get Tango class for the specified device.
:param argin: Device name
:type: tango.DevString
:return: Device Tango class
:rtype: tango.DevString | Below is the the instruction that describes the task:
### Input:
Get Tango class for the specified device.
:param argin: Device name
:type: tango.DevString
:return: Device Tango class
:rtype: tango.DevString
### Response:
def DbGetClassForDevice(self, argin):
""" Get Tango class for the specified device.
:param argin: Device name
:type: tango.DevString
:return: Device Tango class
:rtype: tango.DevString """
self._log.debug("In DbGetClassForDevice()")
return self.db.get_class_for_device(argin) |
def inspectSpatialPoolerStats(sp, inputVectors, saveFigPrefix=None):
"""
Inspect the statistics of a spatial pooler given a set of input vectors
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns, ), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
avgInputOverlap = np.mean(inputOverlap, 0)
entropy = calculateEntropy(outputColumns)
activationProb = np.mean(outputColumns.astype(realDType), 0)
dutyCycleDist, binEdge = np.histogram(activationProb,
bins=10, range=[-0.005, 0.095])
dutyCycleDist = dutyCycleDist.astype('float32') / np.sum(dutyCycleDist)
binCenter = (binEdge[1:] + binEdge[:-1])/2
fig, axs = plt.subplots(2, 2)
axs[0, 0].hist(connectedCounts)
axs[0, 0].set_xlabel('# Connected Synapse')
axs[0, 1].hist(winnerInputOverlap)
axs[0, 1].set_xlabel('# winner input overlap')
axs[1, 0].bar(binEdge[:-1]+0.001, dutyCycleDist, width=.008)
axs[1, 0].set_xlim([-0.005, .1])
axs[1, 0].set_xlabel('Activation Frequency')
axs[1, 0].set_title('Entropy: {}'.format(entropy))
axs[1, 1].plot(connectedCounts, activationProb, '.')
axs[1, 1].set_xlabel('connection #')
axs[1, 1].set_ylabel('activation freq')
plt.tight_layout()
if saveFigPrefix is not None:
plt.savefig('figures/{}_network_stats.pdf'.format(saveFigPrefix))
return fig | Inspect the statistics of a spatial pooler given a set of input vectors
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors | Below is the the instruction that describes the task:
### Input:
Inspect the statistics of a spatial pooler given a set of input vectors
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
### Response:
def inspectSpatialPoolerStats(sp, inputVectors, saveFigPrefix=None):
"""
Inspect the statistics of a spatial pooler given a set of input vectors
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns, ), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
avgInputOverlap = np.mean(inputOverlap, 0)
entropy = calculateEntropy(outputColumns)
activationProb = np.mean(outputColumns.astype(realDType), 0)
dutyCycleDist, binEdge = np.histogram(activationProb,
bins=10, range=[-0.005, 0.095])
dutyCycleDist = dutyCycleDist.astype('float32') / np.sum(dutyCycleDist)
binCenter = (binEdge[1:] + binEdge[:-1])/2
fig, axs = plt.subplots(2, 2)
axs[0, 0].hist(connectedCounts)
axs[0, 0].set_xlabel('# Connected Synapse')
axs[0, 1].hist(winnerInputOverlap)
axs[0, 1].set_xlabel('# winner input overlap')
axs[1, 0].bar(binEdge[:-1]+0.001, dutyCycleDist, width=.008)
axs[1, 0].set_xlim([-0.005, .1])
axs[1, 0].set_xlabel('Activation Frequency')
axs[1, 0].set_title('Entropy: {}'.format(entropy))
axs[1, 1].plot(connectedCounts, activationProb, '.')
axs[1, 1].set_xlabel('connection #')
axs[1, 1].set_ylabel('activation freq')
plt.tight_layout()
if saveFigPrefix is not None:
plt.savefig('figures/{}_network_stats.pdf'.format(saveFigPrefix))
return fig |
def update(self, key=values.unset, value=values.unset):
"""
Update the VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Updated VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
"""
data = values.of({'Key': key, 'Value': value, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return VariableInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
environment_sid=self._solution['environment_sid'],
sid=self._solution['sid'],
) | Update the VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Updated VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance | Below is the the instruction that describes the task:
### Input:
Update the VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Updated VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
### Response:
def update(self, key=values.unset, value=values.unset):
"""
Update the VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Updated VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
"""
data = values.of({'Key': key, 'Value': value, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return VariableInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
environment_sid=self._solution['environment_sid'],
sid=self._solution['sid'],
) |
async def freeze(self, *args, **kwargs):
"""
Freeze users balance
Accepts:
- uid [integer] (users id from main server)
- coinid [string] (blockchain type in uppercase)
- amount [integer] (amount for freezing)
Returns:
- uid [integer] (users id from main server)
- coinid [string] (blockchain type in uppercase)
- amount_active [integer] (activae users amount)
- amount_frozen [integer] (frozen users amount)
"""
# Get data from request
uid = kwargs.get("uid", 0)
coinid = kwargs.get("coinid")
amount = kwargs.get("amount")
address = kwargs.get("address")
try:
coinid = coinid.replace("TEST", "")
except:
pass
try:
uid = int(uid)
except:
return await self.error_400("User id must be integer. ")
try:
amount = int(amount)
except:
return await self.error_400("Amount must be integer. ")
try:
assert amount > 0
except:
return await self.error_400("Amount must be positive integer. ")
# Check if required fields exists
if not uid and address:
uid = await self.get_uid_by_address(address=address, coinid=coinid)
if isinstance(uid, dict):
return uid
# Connect to appropriate database
database = self.client[self.collection]
collection = database[coinid]
# Check if balance exists
balance = await collection.find_one({"uid":uid})
if not balance:
return await self.error_404(
"Freeze. Balance with uid:%s and type:%s not found." % (uid, coinid))
# Check if amount is enough
difference = int(balance["amount_active"]) - int(amount)
if difference < 0:
return await self.error_403("Freeze. Insufficient amount in the account")
# Decrement active amount and increment frozen amount
amount_frozen = int(balance["amount_frozen"]) + int(amount)
await collection.find_one_and_update({"uid":uid},
{"$set":{"amount_active":str(difference),
"amount_frozen":str(amount_frozen)}})
# Return updated balance with excluded mongo _id field
result = await collection.find_one({"uid":uid})
result["amount_frozen"] = int(result["amount_frozen"])
result["amount_active"] = int(result["amount_active"])
del result["_id"]
return result | Freeze users balance
Accepts:
- uid [integer] (users id from main server)
- coinid [string] (blockchain type in uppercase)
- amount [integer] (amount for freezing)
Returns:
- uid [integer] (users id from main server)
- coinid [string] (blockchain type in uppercase)
- amount_active [integer] (activae users amount)
- amount_frozen [integer] (frozen users amount) | Below is the the instruction that describes the task:
### Input:
Freeze users balance
Accepts:
- uid [integer] (users id from main server)
- coinid [string] (blockchain type in uppercase)
- amount [integer] (amount for freezing)
Returns:
- uid [integer] (users id from main server)
- coinid [string] (blockchain type in uppercase)
- amount_active [integer] (activae users amount)
- amount_frozen [integer] (frozen users amount)
### Response:
async def freeze(self, *args, **kwargs):
"""
Freeze users balance
Accepts:
- uid [integer] (users id from main server)
- coinid [string] (blockchain type in uppercase)
- amount [integer] (amount for freezing)
Returns:
- uid [integer] (users id from main server)
- coinid [string] (blockchain type in uppercase)
- amount_active [integer] (activae users amount)
- amount_frozen [integer] (frozen users amount)
"""
# Get data from request
uid = kwargs.get("uid", 0)
coinid = kwargs.get("coinid")
amount = kwargs.get("amount")
address = kwargs.get("address")
try:
coinid = coinid.replace("TEST", "")
except:
pass
try:
uid = int(uid)
except:
return await self.error_400("User id must be integer. ")
try:
amount = int(amount)
except:
return await self.error_400("Amount must be integer. ")
try:
assert amount > 0
except:
return await self.error_400("Amount must be positive integer. ")
# Check if required fields exists
if not uid and address:
uid = await self.get_uid_by_address(address=address, coinid=coinid)
if isinstance(uid, dict):
return uid
# Connect to appropriate database
database = self.client[self.collection]
collection = database[coinid]
# Check if balance exists
balance = await collection.find_one({"uid":uid})
if not balance:
return await self.error_404(
"Freeze. Balance with uid:%s and type:%s not found." % (uid, coinid))
# Check if amount is enough
difference = int(balance["amount_active"]) - int(amount)
if difference < 0:
return await self.error_403("Freeze. Insufficient amount in the account")
# Decrement active amount and increment frozen amount
amount_frozen = int(balance["amount_frozen"]) + int(amount)
await collection.find_one_and_update({"uid":uid},
{"$set":{"amount_active":str(difference),
"amount_frozen":str(amount_frozen)}})
# Return updated balance with excluded mongo _id field
result = await collection.find_one({"uid":uid})
result["amount_frozen"] = int(result["amount_frozen"])
result["amount_active"] = int(result["amount_active"])
del result["_id"]
return result |
def _check_err(data):
"""
:param data: response json data object (must be not None).
Check possible error code returned in the response body
raise the coresponding exceptions
"""
if data['d'] is None:
raise NoDataReturned()
if data['d']['Messages'] is None:
return
msg = data['d']['Messages']
if len(msg) == 0 or msg[0]['Code'] is None:
raise UnknownError()
code = int(msg[0]['Code'])
if code == 3006:
raise CalendarNotExist()
elif code == 3007:
raise CalendarOwnByDiffAccount()
else:
logger.warn(
"Unexpected Error Code: %s %s" % (
code, msg[0]['Description']))
raise UnexpectedError() | :param data: response json data object (must be not None).
Check possible error code returned in the response body
raise the coresponding exceptions | Below is the the instruction that describes the task:
### Input:
:param data: response json data object (must be not None).
Check possible error code returned in the response body
raise the coresponding exceptions
### Response:
def _check_err(data):
"""
:param data: response json data object (must be not None).
Check possible error code returned in the response body
raise the coresponding exceptions
"""
if data['d'] is None:
raise NoDataReturned()
if data['d']['Messages'] is None:
return
msg = data['d']['Messages']
if len(msg) == 0 or msg[0]['Code'] is None:
raise UnknownError()
code = int(msg[0]['Code'])
if code == 3006:
raise CalendarNotExist()
elif code == 3007:
raise CalendarOwnByDiffAccount()
else:
logger.warn(
"Unexpected Error Code: %s %s" % (
code, msg[0]['Description']))
raise UnexpectedError() |
def load_json_fixture(fixture_path: str) -> Dict[str, Any]:
"""
Loads a fixture file, caching the most recent files it loaded.
"""
with open(fixture_path) as fixture_file:
file_fixtures = json.load(fixture_file)
return file_fixtures | Loads a fixture file, caching the most recent files it loaded. | Below is the the instruction that describes the task:
### Input:
Loads a fixture file, caching the most recent files it loaded.
### Response:
def load_json_fixture(fixture_path: str) -> Dict[str, Any]:
"""
Loads a fixture file, caching the most recent files it loaded.
"""
with open(fixture_path) as fixture_file:
file_fixtures = json.load(fixture_file)
return file_fixtures |
def _logsumexp(ary, *, b=None, b_inv=None, axis=None, keepdims=False, out=None, copy=True):
"""Stable logsumexp when b >= 0 and b is scalar.
b_inv overwrites b unless b_inv is None.
"""
# check dimensions for result arrays
ary = np.asarray(ary)
if ary.dtype.kind == "i":
ary = ary.astype(np.float64)
dtype = ary.dtype.type
shape = ary.shape
shape_len = len(shape)
if isinstance(axis, Sequence):
axis = tuple(axis_i if axis_i >= 0 else shape_len + axis_i for axis_i in axis)
agroup = axis
else:
axis = axis if (axis is None) or (axis >= 0) else shape_len + axis
agroup = (axis,)
shape_max = (
tuple(1 for _ in shape)
if axis is None
else tuple(1 if i in agroup else d for i, d in enumerate(shape))
)
# create result arrays
if out is None:
if not keepdims:
out_shape = (
tuple()
if axis is None
else tuple(d for i, d in enumerate(shape) if i not in agroup)
)
else:
out_shape = shape_max
out = np.empty(out_shape, dtype=dtype)
if b_inv == 0:
return np.full_like(out, np.inf, dtype=dtype) if out.shape else np.inf
if b_inv is None and b == 0:
return np.full_like(out, -np.inf) if out.shape else -np.inf
ary_max = np.empty(shape_max, dtype=dtype)
# calculations
ary.max(axis=axis, keepdims=True, out=ary_max)
if copy:
ary = ary.copy()
ary -= ary_max
np.exp(ary, out=ary)
ary.sum(axis=axis, keepdims=keepdims, out=out)
np.log(out, out=out)
if b_inv is not None:
ary_max -= np.log(b_inv)
elif b:
ary_max += np.log(b)
out += ary_max.squeeze() if not keepdims else ary_max
# transform to scalar if possible
return out if out.shape else dtype(out) | Stable logsumexp when b >= 0 and b is scalar.
b_inv overwrites b unless b_inv is None. | Below is the the instruction that describes the task:
### Input:
Stable logsumexp when b >= 0 and b is scalar.
b_inv overwrites b unless b_inv is None.
### Response:
def _logsumexp(ary, *, b=None, b_inv=None, axis=None, keepdims=False, out=None, copy=True):
"""Stable logsumexp when b >= 0 and b is scalar.
b_inv overwrites b unless b_inv is None.
"""
# check dimensions for result arrays
ary = np.asarray(ary)
if ary.dtype.kind == "i":
ary = ary.astype(np.float64)
dtype = ary.dtype.type
shape = ary.shape
shape_len = len(shape)
if isinstance(axis, Sequence):
axis = tuple(axis_i if axis_i >= 0 else shape_len + axis_i for axis_i in axis)
agroup = axis
else:
axis = axis if (axis is None) or (axis >= 0) else shape_len + axis
agroup = (axis,)
shape_max = (
tuple(1 for _ in shape)
if axis is None
else tuple(1 if i in agroup else d for i, d in enumerate(shape))
)
# create result arrays
if out is None:
if not keepdims:
out_shape = (
tuple()
if axis is None
else tuple(d for i, d in enumerate(shape) if i not in agroup)
)
else:
out_shape = shape_max
out = np.empty(out_shape, dtype=dtype)
if b_inv == 0:
return np.full_like(out, np.inf, dtype=dtype) if out.shape else np.inf
if b_inv is None and b == 0:
return np.full_like(out, -np.inf) if out.shape else -np.inf
ary_max = np.empty(shape_max, dtype=dtype)
# calculations
ary.max(axis=axis, keepdims=True, out=ary_max)
if copy:
ary = ary.copy()
ary -= ary_max
np.exp(ary, out=ary)
ary.sum(axis=axis, keepdims=keepdims, out=out)
np.log(out, out=out)
if b_inv is not None:
ary_max -= np.log(b_inv)
elif b:
ary_max += np.log(b)
out += ary_max.squeeze() if not keepdims else ary_max
# transform to scalar if possible
return out if out.shape else dtype(out) |
def sessions(status, access_key, id_only, all):
'''
List and manage compute sessions.
'''
fields = [
('Session ID', 'sess_id'),
]
with Session() as session:
if is_admin(session):
fields.append(('Owner', 'access_key'))
if not id_only:
fields.extend([
('Image', 'image'),
('Tag', 'tag'),
('Created At', 'created_at',),
('Terminated At', 'terminated_at'),
('Status', 'status'),
('Occupied Resource', 'occupied_slots'),
('Used Memory (MiB)', 'mem_cur_bytes'),
('Max Used Memory (MiB)', 'mem_max_bytes'),
('CPU Using (%)', 'cpu_using'),
])
if is_legacy_server():
del fields[2]
def execute_paginated_query(limit, offset):
q = '''
query($limit:Int!, $offset:Int!, $ak:String, $status:String) {
compute_session_list(
limit:$limit, offset:$offset, access_key:$ak, status:$status) {
items { $fields }
total_count
}
}'''
q = textwrap.dedent(q).strip()
q = q.replace('$fields', ' '.join(item[1] for item in fields))
v = {
'limit': limit,
'offset': offset,
'status': status if status != 'ALL' else None,
'ak': access_key,
}
try:
resp = session.Admin.query(q, v)
except Exception as e:
print_error(e)
sys.exit(1)
return resp['compute_session_list']
def round_mem(items):
for item in items:
if 'mem_cur_bytes' in item:
item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1)
if 'mem_max_bytes' in item:
item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1)
return items
def _generate_paginated_results(interval):
offset = 0
is_first = True
total_count = -1
while True:
limit = (interval if is_first else
min(interval, total_count - offset))
try:
result = execute_paginated_query(limit, offset)
except Exception as e:
print_error(e)
sys.exit(1)
offset += interval
total_count = result['total_count']
items = result['items']
items = round_mem(items)
if id_only:
yield '\n'.join([item['sess_id'] for item in items]) + '\n'
else:
table = tabulate([item.values() for item in items],
headers=(item[0] for item in fields))
if not is_first:
table_rows = table.split('\n')
table = '\n'.join(table_rows[2:])
yield table + '\n'
if is_first:
is_first = False
if not offset < total_count:
break
with Session() as session:
paginating_interval = 10
if all:
click.echo_via_pager(_generate_paginated_results(paginating_interval))
else:
result = execute_paginated_query(paginating_interval, offset=0)
total_count = result['total_count']
if total_count == 0:
print('There are no compute sessions currently {0}.'
.format(status.lower()))
return
items = result['items']
items = round_mem(items)
if id_only:
for item in items:
print(item['sess_id'])
else:
print(tabulate([item.values() for item in items],
headers=(item[0] for item in fields)))
if total_count > paginating_interval:
print("More sessions can be displayed by using --all option.") | List and manage compute sessions. | Below is the the instruction that describes the task:
### Input:
List and manage compute sessions.
### Response:
def sessions(status, access_key, id_only, all):
'''
List and manage compute sessions.
'''
fields = [
('Session ID', 'sess_id'),
]
with Session() as session:
if is_admin(session):
fields.append(('Owner', 'access_key'))
if not id_only:
fields.extend([
('Image', 'image'),
('Tag', 'tag'),
('Created At', 'created_at',),
('Terminated At', 'terminated_at'),
('Status', 'status'),
('Occupied Resource', 'occupied_slots'),
('Used Memory (MiB)', 'mem_cur_bytes'),
('Max Used Memory (MiB)', 'mem_max_bytes'),
('CPU Using (%)', 'cpu_using'),
])
if is_legacy_server():
del fields[2]
def execute_paginated_query(limit, offset):
q = '''
query($limit:Int!, $offset:Int!, $ak:String, $status:String) {
compute_session_list(
limit:$limit, offset:$offset, access_key:$ak, status:$status) {
items { $fields }
total_count
}
}'''
q = textwrap.dedent(q).strip()
q = q.replace('$fields', ' '.join(item[1] for item in fields))
v = {
'limit': limit,
'offset': offset,
'status': status if status != 'ALL' else None,
'ak': access_key,
}
try:
resp = session.Admin.query(q, v)
except Exception as e:
print_error(e)
sys.exit(1)
return resp['compute_session_list']
def round_mem(items):
for item in items:
if 'mem_cur_bytes' in item:
item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1)
if 'mem_max_bytes' in item:
item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1)
return items
def _generate_paginated_results(interval):
offset = 0
is_first = True
total_count = -1
while True:
limit = (interval if is_first else
min(interval, total_count - offset))
try:
result = execute_paginated_query(limit, offset)
except Exception as e:
print_error(e)
sys.exit(1)
offset += interval
total_count = result['total_count']
items = result['items']
items = round_mem(items)
if id_only:
yield '\n'.join([item['sess_id'] for item in items]) + '\n'
else:
table = tabulate([item.values() for item in items],
headers=(item[0] for item in fields))
if not is_first:
table_rows = table.split('\n')
table = '\n'.join(table_rows[2:])
yield table + '\n'
if is_first:
is_first = False
if not offset < total_count:
break
with Session() as session:
paginating_interval = 10
if all:
click.echo_via_pager(_generate_paginated_results(paginating_interval))
else:
result = execute_paginated_query(paginating_interval, offset=0)
total_count = result['total_count']
if total_count == 0:
print('There are no compute sessions currently {0}.'
.format(status.lower()))
return
items = result['items']
items = round_mem(items)
if id_only:
for item in items:
print(item['sess_id'])
else:
print(tabulate([item.values() for item in items],
headers=(item[0] for item in fields)))
if total_count > paginating_interval:
print("More sessions can be displayed by using --all option.") |
def register_magics(store_name='_ampl_cells', ampl_object=None):
"""
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.
Args:
store_name: Name of the store where ``%%ampl cells`` will be stored.
ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
"""
from IPython.core.magic import (
Magics, magics_class, cell_magic, line_magic
)
@magics_class
class StoreAMPL(Magics):
def __init__(self, shell=None, **kwargs):
Magics.__init__(self, shell=shell, **kwargs)
self._store = []
shell.user_ns[store_name] = self._store
@cell_magic
def ampl(self, line, cell):
"""Store the cell in the store"""
self._store.append(cell)
@cell_magic
def ampl_eval(self, line, cell):
"""Evaluate the cell"""
ampl_object.eval(cell)
@line_magic
def get_ampl(self, line):
"""Retrieve the store"""
return self._store
get_ipython().register_magics(StoreAMPL) | Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.
Args:
store_name: Name of the store where ``%%ampl cells`` will be stored.
ampl_object: Object used to evaluate ``%%ampl_eval`` cells. | Below is the the instruction that describes the task:
### Input:
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.
Args:
store_name: Name of the store where ``%%ampl cells`` will be stored.
ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
### Response:
def register_magics(store_name='_ampl_cells', ampl_object=None):
"""
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.
Args:
store_name: Name of the store where ``%%ampl cells`` will be stored.
ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
"""
from IPython.core.magic import (
Magics, magics_class, cell_magic, line_magic
)
@magics_class
class StoreAMPL(Magics):
def __init__(self, shell=None, **kwargs):
Magics.__init__(self, shell=shell, **kwargs)
self._store = []
shell.user_ns[store_name] = self._store
@cell_magic
def ampl(self, line, cell):
"""Store the cell in the store"""
self._store.append(cell)
@cell_magic
def ampl_eval(self, line, cell):
"""Evaluate the cell"""
ampl_object.eval(cell)
@line_magic
def get_ampl(self, line):
"""Retrieve the store"""
return self._store
get_ipython().register_magics(StoreAMPL) |
def send_text(self, txt, status=200):
"""
Sends plaintext response to client. Automatically sets the content-type
header to text/plain. If txt is not a string, it will be formatted as
one.
Parameters
----------
txt : str
The plaintext string to be sent back to the client
status : int, optional
The HTTP status code, defaults to 200 (OK)
"""
self.headers.setdefault('content-type', 'text/plain')
if not isinstance(txt, bytes):
txt = str(txt).encode()
self.message = txt
self.status_code = status
self.end() | Sends plaintext response to client. Automatically sets the content-type
header to text/plain. If txt is not a string, it will be formatted as
one.
Parameters
----------
txt : str
The plaintext string to be sent back to the client
status : int, optional
The HTTP status code, defaults to 200 (OK) | Below is the the instruction that describes the task:
### Input:
Sends plaintext response to client. Automatically sets the content-type
header to text/plain. If txt is not a string, it will be formatted as
one.
Parameters
----------
txt : str
The plaintext string to be sent back to the client
status : int, optional
The HTTP status code, defaults to 200 (OK)
### Response:
def send_text(self, txt, status=200):
"""
Sends plaintext response to client. Automatically sets the content-type
header to text/plain. If txt is not a string, it will be formatted as
one.
Parameters
----------
txt : str
The plaintext string to be sent back to the client
status : int, optional
The HTTP status code, defaults to 200 (OK)
"""
self.headers.setdefault('content-type', 'text/plain')
if not isinstance(txt, bytes):
txt = str(txt).encode()
self.message = txt
self.status_code = status
self.end() |
def close(self):
"""
Closes the job manager. No more jobs will be assigned, no more job sets
will be added, and any queued or active job sets will be cancelled.
"""
if self._closed:
return
self._closed = True
if self._active_js is not None:
self._active_js.cancel()
for js in self._js_queue:
js.cancel() | Closes the job manager. No more jobs will be assigned, no more job sets
will be added, and any queued or active job sets will be cancelled. | Below is the the instruction that describes the task:
### Input:
Closes the job manager. No more jobs will be assigned, no more job sets
will be added, and any queued or active job sets will be cancelled.
### Response:
def close(self):
"""
Closes the job manager. No more jobs will be assigned, no more job sets
will be added, and any queued or active job sets will be cancelled.
"""
if self._closed:
return
self._closed = True
if self._active_js is not None:
self._active_js.cancel()
for js in self._js_queue:
js.cancel() |
def add_view(self, *args, **kwargs):
"""
Redirect to the change view if the singleton instance exists.
"""
try:
singleton = self.model.objects.get()
except (self.model.DoesNotExist, self.model.MultipleObjectsReturned):
kwargs.setdefault("extra_context", {})
kwargs["extra_context"]["singleton"] = True
response = super(SingletonAdmin, self).add_view(*args, **kwargs)
return self.handle_save(args[0], response)
return redirect(admin_url(self.model, "change", singleton.id)) | Redirect to the change view if the singleton instance exists. | Below is the the instruction that describes the task:
### Input:
Redirect to the change view if the singleton instance exists.
### Response:
def add_view(self, *args, **kwargs):
"""
Redirect to the change view if the singleton instance exists.
"""
try:
singleton = self.model.objects.get()
except (self.model.DoesNotExist, self.model.MultipleObjectsReturned):
kwargs.setdefault("extra_context", {})
kwargs["extra_context"]["singleton"] = True
response = super(SingletonAdmin, self).add_view(*args, **kwargs)
return self.handle_save(args[0], response)
return redirect(admin_url(self.model, "change", singleton.id)) |
def next_date(self):
"""
Date when this event is next scheduled to occur in the local time zone
(Does not include postponements, but does exclude cancellations)
"""
nextDt = self.__localAfter(timezone.localtime(), dt.time.min)
if nextDt is not None:
return nextDt.date() | Date when this event is next scheduled to occur in the local time zone
(Does not include postponements, but does exclude cancellations) | Below is the the instruction that describes the task:
### Input:
Date when this event is next scheduled to occur in the local time zone
(Does not include postponements, but does exclude cancellations)
### Response:
def next_date(self):
"""
Date when this event is next scheduled to occur in the local time zone
(Does not include postponements, but does exclude cancellations)
"""
nextDt = self.__localAfter(timezone.localtime(), dt.time.min)
if nextDt is not None:
return nextDt.date() |
def get_current(self):
"""Get current forecast."""
now = dt.now().timestamp()
url = build_url(self.api_key, self.spot_id, self.fields,
self.unit, now, now)
return get_msw(url) | Get current forecast. | Below is the the instruction that describes the task:
### Input:
Get current forecast.
### Response:
def get_current(self):
"""Get current forecast."""
now = dt.now().timestamp()
url = build_url(self.api_key, self.spot_id, self.fields,
self.unit, now, now)
return get_msw(url) |
def get_template(
self,
url,
dest,
template='jinja',
makedirs=False,
saltenv='base',
cachedir=None,
**kwargs):
'''
Cache a file then process it as a template
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
kwargs['saltenv'] = saltenv
url_data = urlparse(url)
sfn = self.cache_file(url, saltenv, cachedir=cachedir)
if not sfn or not os.path.exists(sfn):
return ''
if template in salt.utils.templates.TEMPLATE_REGISTRY:
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
**kwargs
)
else:
log.error(
'Attempted to render template with unavailable engine %s',
template
)
return ''
if not data['result']:
# Failed to render the template
log.error('Failed to render template with error: %s', data['data'])
return ''
if not dest:
# No destination passed, set the dest as an extrn_files cache
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
# If Salt generated the dest name, create any required dirs
makedirs = True
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
if makedirs:
os.makedirs(destdir)
else:
salt.utils.files.safe_rm(data['data'])
return ''
shutil.move(data['data'], dest)
return dest | Cache a file then process it as a template | Below is the the instruction that describes the task:
### Input:
Cache a file then process it as a template
### Response:
def get_template(
self,
url,
dest,
template='jinja',
makedirs=False,
saltenv='base',
cachedir=None,
**kwargs):
'''
Cache a file then process it as a template
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
kwargs['saltenv'] = saltenv
url_data = urlparse(url)
sfn = self.cache_file(url, saltenv, cachedir=cachedir)
if not sfn or not os.path.exists(sfn):
return ''
if template in salt.utils.templates.TEMPLATE_REGISTRY:
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
**kwargs
)
else:
log.error(
'Attempted to render template with unavailable engine %s',
template
)
return ''
if not data['result']:
# Failed to render the template
log.error('Failed to render template with error: %s', data['data'])
return ''
if not dest:
# No destination passed, set the dest as an extrn_files cache
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
# If Salt generated the dest name, create any required dirs
makedirs = True
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
if makedirs:
os.makedirs(destdir)
else:
salt.utils.files.safe_rm(data['data'])
return ''
shutil.move(data['data'], dest)
return dest |
def mpr(truth, recommend):
"""Mean Percentile Rank (MPR).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
float: MPR.
"""
if len(recommend) == 0 and len(truth) == 0:
return 0. # best
elif len(truth) == 0 or len(truth) == 0:
return 100. # worst
accum = 0.
n_recommend = recommend.size
for t in truth:
r = np.where(recommend == t)[0][0] / float(n_recommend)
accum += r
return accum * 100. / truth.size | Mean Percentile Rank (MPR).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
float: MPR. | Below is the the instruction that describes the task:
### Input:
Mean Percentile Rank (MPR).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
float: MPR.
### Response:
def mpr(truth, recommend):
"""Mean Percentile Rank (MPR).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
float: MPR.
"""
if len(recommend) == 0 and len(truth) == 0:
return 0. # best
elif len(truth) == 0 or len(truth) == 0:
return 100. # worst
accum = 0.
n_recommend = recommend.size
for t in truth:
r = np.where(recommend == t)[0][0] / float(n_recommend)
accum += r
return accum * 100. / truth.size |
def _prompt_wrapper(message, default=None, validator=None):
""" Handle references piped from file
"""
class MockDocument:
def __init__(self, text):
self.text = text
if HAS_INPUT:
ret = prompt(message, default=default, validator=validator)
else:
ret = sys.stdin.readline().strip()
print(message, ret)
if validator:
validator.validate(MockDocument(ret))
if "q" in ret:
if not HAS_OUTPUT:
print("User exit")
sys.exit("User exit")
return ret | Handle references piped from file | Below is the the instruction that describes the task:
### Input:
Handle references piped from file
### Response:
def _prompt_wrapper(message, default=None, validator=None):
""" Handle references piped from file
"""
class MockDocument:
def __init__(self, text):
self.text = text
if HAS_INPUT:
ret = prompt(message, default=default, validator=validator)
else:
ret = sys.stdin.readline().strip()
print(message, ret)
if validator:
validator.validate(MockDocument(ret))
if "q" in ret:
if not HAS_OUTPUT:
print("User exit")
sys.exit("User exit")
return ret |
def get_iterator_from_config(config: dict, data: dict):
"""Create iterator (from config) for specified data."""
iterator_config = config['dataset_iterator']
iterator: Union[DataLearningIterator, DataFittingIterator] = from_params(iterator_config,
data=data)
return iterator | Create iterator (from config) for specified data. | Below is the the instruction that describes the task:
### Input:
Create iterator (from config) for specified data.
### Response:
def get_iterator_from_config(config: dict, data: dict):
"""Create iterator (from config) for specified data."""
iterator_config = config['dataset_iterator']
iterator: Union[DataLearningIterator, DataFittingIterator] = from_params(iterator_config,
data=data)
return iterator |
def _adjust(a, a_offset, b):
"""
a = bytearray
a_offset = int
b = bytearray
"""
x = (b[-1] & 0xFF) + (a[a_offset + len(b) - 1] & 0xFF) + 1
a[a_offset + len(b) - 1] = ctypes.c_ubyte(x).value
x >>= 8
for i in range(len(b)-2, -1, -1):
x += (b[i] & 0xFF) + (a[a_offset + i] & 0xFF)
a[a_offset + i] = ctypes.c_ubyte(x).value
x >>= 8 | a = bytearray
a_offset = int
b = bytearray | Below is the the instruction that describes the task:
### Input:
a = bytearray
a_offset = int
b = bytearray
### Response:
def _adjust(a, a_offset, b):
"""
a = bytearray
a_offset = int
b = bytearray
"""
x = (b[-1] & 0xFF) + (a[a_offset + len(b) - 1] & 0xFF) + 1
a[a_offset + len(b) - 1] = ctypes.c_ubyte(x).value
x >>= 8
for i in range(len(b)-2, -1, -1):
x += (b[i] & 0xFF) + (a[a_offset + i] & 0xFF)
a[a_offset + i] = ctypes.c_ubyte(x).value
x >>= 8 |
def cron(name, timespec, user, command, environ=None, disable=False):
"""Create entry in /etc/cron.d"""
path = '/etc/cron.d/{}'.format(name)
if disable:
sudo('rm ' + path)
return
entry = '{}\t{}\t{}\n'.format(timespec, user, command)
if environ:
envstr = '\n'.join('{}={}'.format(k, v)
for k, v in environ.iteritems())
entry = '{}\n{}'.format(envstr, entry)
chput(StringIO(entry), path, use_sudo=True,
mode=0o644, user='root', group='root') | Create entry in /etc/cron.d | Below is the the instruction that describes the task:
### Input:
Create entry in /etc/cron.d
### Response:
def cron(name, timespec, user, command, environ=None, disable=False):
"""Create entry in /etc/cron.d"""
path = '/etc/cron.d/{}'.format(name)
if disable:
sudo('rm ' + path)
return
entry = '{}\t{}\t{}\n'.format(timespec, user, command)
if environ:
envstr = '\n'.join('{}={}'.format(k, v)
for k, v in environ.iteritems())
entry = '{}\n{}'.format(envstr, entry)
chput(StringIO(entry), path, use_sudo=True,
mode=0o644, user='root', group='root') |
def delete_publisher_asset(self, publisher_name, asset_type=None):
"""DeletePublisherAsset.
[Preview API] Delete publisher asset like logo
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if asset_type is not None:
query_parameters['assetType'] = self._serialize.query('asset_type', asset_type, 'str')
self._send(http_method='DELETE',
location_id='21143299-34f9-4c62-8ca8-53da691192f9',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters) | DeletePublisherAsset.
[Preview API] Delete publisher asset like logo
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'. | Below is the the instruction that describes the task:
### Input:
DeletePublisherAsset.
[Preview API] Delete publisher asset like logo
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
### Response:
def delete_publisher_asset(self, publisher_name, asset_type=None):
"""DeletePublisherAsset.
[Preview API] Delete publisher asset like logo
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if asset_type is not None:
query_parameters['assetType'] = self._serialize.query('asset_type', asset_type, 'str')
self._send(http_method='DELETE',
location_id='21143299-34f9-4c62-8ca8-53da691192f9',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters) |
def _archive_single_dir(archive):
"""
Check if all members of the archive are in a single top-level directory
:param archive:
An archive from _open_archive()
:return:
None if not a single top level directory in archive, otherwise a
unicode string of the top level directory name
"""
common_root = None
for info in _list_archive_members(archive):
fn = _info_name(info)
if fn in set(['.', '/']):
continue
sep = None
if '/' in fn:
sep = '/'
elif '\\' in fn:
sep = '\\'
if sep is None:
root_dir = fn
else:
root_dir, _ = fn.split(sep, 1)
if common_root is None:
common_root = root_dir
else:
if common_root != root_dir:
return None
return common_root | Check if all members of the archive are in a single top-level directory
:param archive:
An archive from _open_archive()
:return:
None if not a single top level directory in archive, otherwise a
unicode string of the top level directory name | Below is the the instruction that describes the task:
### Input:
Check if all members of the archive are in a single top-level directory
:param archive:
An archive from _open_archive()
:return:
None if not a single top level directory in archive, otherwise a
unicode string of the top level directory name
### Response:
def _archive_single_dir(archive):
"""
Check if all members of the archive are in a single top-level directory
:param archive:
An archive from _open_archive()
:return:
None if not a single top level directory in archive, otherwise a
unicode string of the top level directory name
"""
common_root = None
for info in _list_archive_members(archive):
fn = _info_name(info)
if fn in set(['.', '/']):
continue
sep = None
if '/' in fn:
sep = '/'
elif '\\' in fn:
sep = '\\'
if sep is None:
root_dir = fn
else:
root_dir, _ = fn.split(sep, 1)
if common_root is None:
common_root = root_dir
else:
if common_root != root_dir:
return None
return common_root |
def send_xapi_statements(self, lrs_configuration, days):
"""
Send xAPI analytics data of the enterprise learners to the given LRS.
Arguments:
lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations
of the LRS where to send xAPI learner analytics.
days (int): Include course enrollment of this number of days.
"""
persistent_course_grades = self.get_course_completions(lrs_configuration.enterprise_customer, days)
users = self.prefetch_users(persistent_course_grades)
course_overviews = self.prefetch_courses(persistent_course_grades)
for persistent_course_grade in persistent_course_grades:
try:
user = users.get(persistent_course_grade.user_id)
course_overview = course_overviews.get(persistent_course_grade.course_id)
course_grade = CourseGradeFactory().read(user, course_key=persistent_course_grade.course_id)
send_course_completion_statement(lrs_configuration, user, course_overview, course_grade)
except ClientError:
LOGGER.exception(
'Client error while sending course completion to xAPI for'
' enterprise customer {enterprise_customer}.'.format(
enterprise_customer=lrs_configuration.enterprise_customer.name
)
) | Send xAPI analytics data of the enterprise learners to the given LRS.
Arguments:
lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations
of the LRS where to send xAPI learner analytics.
days (int): Include course enrollment of this number of days. | Below is the the instruction that describes the task:
### Input:
Send xAPI analytics data of the enterprise learners to the given LRS.
Arguments:
lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations
of the LRS where to send xAPI learner analytics.
days (int): Include course enrollment of this number of days.
### Response:
def send_xapi_statements(self, lrs_configuration, days):
"""
Send xAPI analytics data of the enterprise learners to the given LRS.
Arguments:
lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations
of the LRS where to send xAPI learner analytics.
days (int): Include course enrollment of this number of days.
"""
persistent_course_grades = self.get_course_completions(lrs_configuration.enterprise_customer, days)
users = self.prefetch_users(persistent_course_grades)
course_overviews = self.prefetch_courses(persistent_course_grades)
for persistent_course_grade in persistent_course_grades:
try:
user = users.get(persistent_course_grade.user_id)
course_overview = course_overviews.get(persistent_course_grade.course_id)
course_grade = CourseGradeFactory().read(user, course_key=persistent_course_grade.course_id)
send_course_completion_statement(lrs_configuration, user, course_overview, course_grade)
except ClientError:
LOGGER.exception(
'Client error while sending course completion to xAPI for'
' enterprise customer {enterprise_customer}.'.format(
enterprise_customer=lrs_configuration.enterprise_customer.name
)
) |
def specimens_results_magic(infile='pmag_specimens.txt', measfile='magic_measurements.txt', sampfile='er_samples.txt', sitefile='er_sites.txt', agefile='er_ages.txt', specout='er_specimens.txt', sampout='pmag_samples.txt', siteout='pmag_sites.txt', resout='pmag_results.txt', critout='pmag_criteria.txt', instout='magic_instruments.txt', plotsites=False, fmt='svg', dir_path='.', cors=[], priorities=['DA-AC-ARM', 'DA-AC-TRM'], coord='g', user='', vgps_level='site', do_site_intensity=True, DefaultAge=["none"], avg_directions_by_sample=False, avg_intensities_by_sample=False, avg_all_components=False, avg_by_polarity=False, skip_directions=False, skip_intensities=False, use_sample_latitude=False, use_paleolatitude=False, use_criteria='default'):
"""
Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages.
@param -> infile: path from the WD to the pmag speciemns table
@param -> measfile: path from the WD to the magic measurement file
@param -> sampfile: path from the WD to the er sample file
@param -> sitefile: path from the WD to the er sites data file
@param -> agefile: path from the WD to the er ages data file
@param -> specout: path from the WD to the place to write the er specimens data file
@param -> sampout: path from the WD to the place to write the pmag samples data file
@param -> siteout: path from the WD to the place to write the pmag sites data file
@param -> resout: path from the WD to the place to write the pmag results data file
@param -> critout: path from the WD to the place to write the pmag criteria file
@param -> instout: path from th WD to the place to write the magic instruments file
@param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string
"""
# initialize some variables
plotsites = False # cannot use draw_figs from within ipmag
Comps = [] # list of components
version_num = pmag.get_version()
args = sys.argv
model_lat_file = ""
Dcrit, Icrit, nocrit = 0, 0, 0
corrections = []
nocorrection = ['DA-NL', 'DA-AC', 'DA-CR']
# do some data adjustments
for cor in cors:
nocorrection.remove('DA-' + cor)
corrections.append('DA-' + cor)
for p in priorities:
if not p.startswith('DA-AC-'):
p = 'DA-AC-' + p
# translate coord into coords
if coord == 's':
coords = ['-1']
if coord == 'g':
coords = ['0']
if coord == 't':
coords = ['100']
if coord == 'b':
coords = ['0', '100']
if vgps_level == 'sample':
vgps = 1 # save sample level VGPS/VADMs
else:
vgps = 0 # site level
if do_site_intensity:
nositeints = 0
else:
nositeints = 1
# chagne these all to True/False instead of 1/0
if not skip_intensities:
# set model lat and
if use_sample_latitude and use_paleolatitude:
print("you should set a paleolatitude file OR use present day lat - not both")
return False
elif use_sample_latitude:
get_model_lat = 1
elif use_paleolatitude:
get_model_lat = 2
try:
model_lat_file = dir_path + '/' + args[ind + 1]
get_model_lat = 2
mlat = open(model_lat_file, 'r')
ModelLats = []
for line in mlat.readlines():
ModelLat = {}
tmp = line.split()
ModelLat["er_site_name"] = tmp[0]
ModelLat["site_model_lat"] = tmp[1]
ModelLat["er_sample_name"] = tmp[0]
ModelLat["sample_lat"] = tmp[1]
ModelLats.append(ModelLat)
mlat.clos()
except:
print("use_paleolatitude option requires a valid paleolatitude file")
else:
get_model_lat = 0 # skips VADM calculation entirely
if plotsites and not skip_directions: # plot by site - set up plot window
EQ = {}
EQ['eqarea'] = 1
# define figure 1 as equal area projection
pmagplotlib.plot_init(EQ['eqarea'], 5, 5)
# I don't know why this has to be here, but otherwise the first plot
# never plots...
pmagplotlib.plot_net(EQ['eqarea'])
pmagplotlib.draw_figs(EQ)
infile = os.path.join(dir_path, infile)
measfile = os.path.join(dir_path, measfile)
instout = os.path.join(dir_path, instout)
sampfile = os.path.join(dir_path, sampfile)
sitefile = os.path.join(dir_path, sitefile)
agefile = os.path.join(dir_path, agefile)
specout = os.path.join(dir_path, specout)
sampout = os.path.join(dir_path, sampout)
siteout = os.path.join(dir_path, siteout)
resout = os.path.join(dir_path, resout)
critout = os.path.join(dir_path, critout)
if use_criteria == 'none':
Dcrit, Icrit, nocrit = 1, 1, 1 # no selection criteria
crit_data = pmag.default_criteria(nocrit)
elif use_criteria == 'default':
crit_data = pmag.default_criteria(nocrit) # use default criteria
elif use_criteria == 'existing':
crit_data, file_type = pmag.magic_read(
critout) # use pmag_criteria file
print("Acceptance criteria read in from ", critout)
accept = {}
for critrec in crit_data:
for key in list(critrec.keys()):
# need to migrate specimen_dang to specimen_int_dang for intensity
# data using old format
if 'IE-SPEC' in list(critrec.keys()) and 'specimen_dang' in list(critrec.keys()) and 'specimen_int_dang' not in list(critrec.keys()):
critrec['specimen_int_dang'] = critrec['specimen_dang']
del critrec['specimen_dang']
# need to get rid of ron shaars sample_int_sigma_uT
if 'sample_int_sigma_uT' in list(critrec.keys()):
critrec['sample_int_sigma'] = '%10.3e' % (
eval(critrec['sample_int_sigma_uT']) * 1e-6)
if key not in list(accept.keys()) and critrec[key] != '':
accept[key] = critrec[key]
if use_criteria == 'default':
pmag.magic_write(critout, [accept], 'pmag_criteria')
print("\n Pmag Criteria stored in ", critout, '\n')
# now we're done slow dancing
# read in site data - has the lats and lons
SiteNFO, file_type = pmag.magic_read(sitefile)
# read in site data - has the lats and lons
SampNFO, file_type = pmag.magic_read(sampfile)
# find all the sites with height info.
height_nfo = pmag.get_dictitem(SiteNFO, 'site_height', '', 'F')
if agefile:
AgeNFO, file_type = pmag.magic_read(
agefile) # read in the age information
# read in specimen interpretations
Data, file_type = pmag.magic_read(infile)
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
comment, orient = "", []
samples, sites = [], []
for rec in Data: # run through the data filling in missing keys and finding all components, coordinates available
# fill in missing fields, collect unique sample and site names
if 'er_sample_name' not in list(rec.keys()):
rec['er_sample_name'] = ""
elif rec['er_sample_name'] not in samples:
samples.append(rec['er_sample_name'])
if 'er_site_name' not in list(rec.keys()):
rec['er_site_name'] = ""
elif rec['er_site_name'] not in sites:
sites.append(rec['er_site_name'])
if 'specimen_int' not in list(rec.keys()):
rec['specimen_int'] = ''
if 'specimen_comp_name' not in list(rec.keys()) or rec['specimen_comp_name'] == "":
rec['specimen_comp_name'] = 'A'
if rec['specimen_comp_name'] not in Comps:
Comps.append(rec['specimen_comp_name'])
rec['specimen_tilt_correction'] = rec['specimen_tilt_correction'].strip(
'\n')
if "specimen_tilt_correction" not in list(rec.keys()):
rec["specimen_tilt_correction"] = "-1" # assume sample coordinates
if rec["specimen_tilt_correction"] not in orient:
# collect available coordinate systems
orient.append(rec["specimen_tilt_correction"])
if "specimen_direction_type" not in list(rec.keys()):
# assume direction is line - not plane
rec["specimen_direction_type"] = 'l'
if "specimen_dec" not in list(rec.keys()):
# if no declination, set direction type to blank
rec["specimen_direction_type"] = ''
if "specimen_n" not in list(rec.keys()):
rec["specimen_n"] = '' # put in n
if "specimen_alpha95" not in list(rec.keys()):
rec["specimen_alpha95"] = '' # put in alpha95
if "magic_method_codes" not in list(rec.keys()):
rec["magic_method_codes"] = ''
# start parsing data into SpecDirs, SpecPlanes, SpecInts
SpecInts, SpecDirs, SpecPlanes = [], [], []
samples.sort() # get sorted list of samples and sites
sites.sort()
if not skip_intensities: # don't skip intensities
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
if nocrit == 0: # use selection criteria
for rec in IntData: # do selection criteria
kill = pmag.grade(rec, accept, 'specimen_int')
if len(kill) == 0:
# intensity record to be included in sample, site
# calculations
SpecInts.append(rec)
else:
SpecInts = IntData[:] # take everything - no selection criteria
# check for required data adjustments
if len(corrections) > 0 and len(SpecInts) > 0:
for cor in corrections:
# only take specimens with the required corrections
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'has')
if len(nocorrection) > 0 and len(SpecInts) > 0:
for cor in nocorrection:
# exclude the corrections not specified for inclusion
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'not')
# take top priority specimen of its name in remaining specimens (only one
# per customer)
PrioritySpecInts = []
specimens = pmag.get_specs(SpecInts) # get list of uniq specimen names
for spec in specimens:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'er_specimen_name', spec, 'T')
if len(ThisSpecRecs) == 1:
PrioritySpecInts.append(ThisSpecRecs[0])
elif len(ThisSpecRecs) > 1: # more than one
prec = []
for p in priorities:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'magic_method_codes', p, 'has')
if len(ThisSpecRecs) > 0:
prec.append(ThisSpecRecs[0])
PrioritySpecInts.append(prec[0]) # take the best one
SpecInts = PrioritySpecInts # this has the first specimen record
if not skip_directions: # don't skip directions
# retrieve specimens with directed lines and planes
AllDirs = pmag.get_dictitem(Data, 'specimen_direction_type', '', 'F')
# get all specimens with specimen_n information
Ns = pmag.get_dictitem(AllDirs, 'specimen_n', '', 'F')
if nocrit != 1: # use selection criteria
for rec in Ns: # look through everything with specimen_n for "good" data
kill = pmag.grade(rec, accept, 'specimen_dir')
if len(kill) == 0: # nothing killed it
SpecDirs.append(rec)
else: # no criteria
SpecDirs = AllDirs[:] # take them all
# SpecDirs is now the list of all specimen directions (lines and planes)
# that pass muster
# list of all sample data and list of those that pass the DE-SAMP criteria
PmagSamps, SampDirs = [], []
PmagSites, PmagResults = [], [] # list of all site data and selected results
SampInts = []
for samp in samples: # run through the sample names
if avg_directions_by_sample: # average by sample if desired
# get all the directional data for this sample
SampDir = pmag.get_dictitem(SpecDirs, 'er_sample_name', samp, 'T')
if len(SampDir) > 0: # there are some directions
for coord in coords: # step through desired coordinate systems
# get all the directions for this sample
CoordDir = pmag.get_dictitem(
SampDir, 'specimen_tilt_correction', coord, 'T')
if len(CoordDir) > 0: # there are some with this coordinate system
if not avg_all_components: # look component by component
for comp in Comps:
# get all directions from this component
CompDir = pmag.get_dictitem(
CoordDir, 'specimen_comp_name', comp, 'T')
if len(CompDir) > 0: # there are some
# get a sample average from all specimens
PmagSampRec = pmag.lnpbykey(
CompDir, 'sample', 'specimen')
# decorate the sample record
PmagSampRec["er_location_name"] = CompDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CompDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if CompDir[0]['specimen_flag'] == 'g':
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_comp_name'] = comp
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['er_specimen_names'] = pmag.get_list(
CompDir, 'er_specimen_name') # get a list of the specimen names used
PmagSampRec['magic_method_codes'] = pmag.get_list(
CompDir, 'magic_method_codes') # get a list of the methods used
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
else:
kill = []
if len(kill) == 0:
SampDirs.append(PmagSampRec)
if vgps == 1: # if sample level VGP info desired, do that now
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
# print(PmagSampRec)
PmagSamps.append(PmagSampRec)
if avg_all_components: # average all components together basically same as above
PmagSampRec = pmag.lnpbykey(
CoordDir, 'sample', 'specimen')
PmagSampRec["er_location_name"] = CoordDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CoordDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if all(i['specimen_flag'] == 'g' for i in CoordDir):
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = ""
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['sample_comp_name'] = pmag.get_list(
CoordDir, 'specimen_comp_name') # get components used
PmagSampRec['er_specimen_names'] = pmag.get_list(
CoordDir, 'er_specimen_name') # get specimne names averaged
PmagSampRec['magic_method_codes'] = pmag.get_list(
CoordDir, 'magic_method_codes') # assemble method codes
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
if len(kill) == 0: # passes the mustard
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
else: # take everything
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
PmagSamps.append(PmagSampRec)
if avg_intensities_by_sample: # average by sample if desired
# get all the intensity data for this sample
SampI = pmag.get_dictitem(SpecInts, 'er_sample_name', samp, 'T')
if len(SampI) > 0: # there are some
# get average intensity stuff
PmagSampRec = pmag.average_int(SampI, 'specimen', 'sample')
# decorate sample record
PmagSampRec["sample_description"] = "sample intensity"
PmagSampRec["sample_direction_type"] = ""
PmagSampRec['er_site_name'] = SampI[0]["er_site_name"]
PmagSampRec['er_sample_name'] = samp
PmagSampRec['er_location_name'] = SampI[0]["er_location_name"]
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['er_specimen_names'] = pmag.get_list(
SampI, 'er_specimen_name')
PmagSampRec['magic_method_codes'] = pmag.get_list(
SampI, 'magic_method_codes')
if nocrit != 1: # apply criteria!
kill = pmag.grade(PmagSampRec, accept, 'sample_int')
if len(kill) == 0:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
else:
PmagSampRec = {} # sample rejected
else: # no criteria
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
PmagSampRec['pmag_criteria_codes'] = ""
if vgps == 1 and get_model_lat != 0 and PmagSampRec != {}:
if get_model_lat == 1: # use sample latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, SampNFO)
# get rid of the model lat key
del(PmagResRec['model_lat'])
elif get_model_lat == 2: # use model latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, ModelLats)
if PmagResRec != {}:
PmagResRec['magic_method_codes'] = PmagResRec['magic_method_codes'] + ":IE-MLAT"
if PmagResRec != {}:
PmagResRec['er_specimen_names'] = PmagSampRec['er_specimen_names']
PmagResRec['er_sample_names'] = PmagSampRec['er_sample_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['average_int_sigma_perc'] = PmagSampRec['sample_int_sigma_perc']
PmagResRec['average_int_sigma'] = PmagSampRec['sample_int_sigma']
PmagResRec['average_int_n'] = PmagSampRec['sample_int_n']
PmagResRec['vadm_n'] = PmagSampRec['sample_int_n']
PmagResRec['data_type'] = 'i'
PmagResults.append(PmagResRec)
if len(PmagSamps) > 0:
# fill in missing keys from different types of records
TmpSamps, keylist = pmag.fillkeys(PmagSamps)
# save in sample output file
pmag.magic_write(sampout, TmpSamps, 'pmag_samples')
print(' sample averages written to ', sampout)
#
# create site averages from specimens or samples as specified
#
for site in sites:
for coord in coords:
if not avg_directions_by_sample:
key, dirlist = 'specimen', SpecDirs # if specimen averages at site level desired
if avg_directions_by_sample:
key, dirlist = 'sample', SampDirs # if sample averages at site level desired
# get all the sites with directions
tmp = pmag.get_dictitem(dirlist, 'er_site_name', site, 'T')
# use only the last coordinate if avg_all_components==False
tmp1 = pmag.get_dictitem(tmp, key + '_tilt_correction', coord, 'T')
# fish out site information (lat/lon, etc.)
sd = pmag.get_dictitem(SiteNFO, 'er_site_name', site, 'T')
if len(sd) > 0:
sitedat = sd[0]
if not avg_all_components: # do component wise averaging
for comp in Comps:
# get all components comp
siteD = pmag.get_dictitem(
tmp1, key + '_comp_name', comp, 'T')
# remove bad data from means
quality_siteD = []
# remove any records for which specimen_flag or sample_flag are 'b'
# assume 'g' if flag is not provided
for rec in siteD:
spec_quality = rec.get('specimen_flag', 'g')
samp_quality = rec.get('sample_flag', 'g')
if (spec_quality == 'g') and (samp_quality == 'g'):
quality_siteD.append(rec)
siteD = quality_siteD
if len(siteD) > 0: # there are some for this site and component name
# get an average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the site record
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if avg_directions_by_sample:
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
else:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
# determine the demagnetization code (DC3,4 or 5) for this site
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if plotsites:
print(PmagSiteRec['er_site_name'])
# plot and list the data
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else: # last component only
# get the last orientation system specified
siteD = tmp1[:]
if len(siteD) > 0: # there are some
# get the average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the record
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if not avg_directions_by_sample:
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if plotsites:
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else:
print('site information not found in er_sites for site, ',
site, ' site will be skipped')
for PmagSiteRec in PmagSites: # now decorate each dictionary some more, and calculate VGPs etc. for results table
PmagSiteRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagSiteRec['magic_software_packages'] = version_num
if agefile != "":
PmagSiteRec = pmag.get_age(
PmagSiteRec, "er_site_name", "site_inferred_", AgeNFO, DefaultAge)
PmagSiteRec['pmag_criteria_codes'] = 'ACCEPT'
if 'site_n_lines' in list(PmagSiteRec.keys()) and 'site_n_planes' in list(PmagSiteRec.keys()) and PmagSiteRec['site_n_lines'] != "" and PmagSiteRec['site_n_planes'] != "":
if int(PmagSiteRec["site_n_planes"]) > 0:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM-LP"
elif int(PmagSiteRec["site_n_lines"]) > 2:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM"
kill = pmag.grade(PmagSiteRec, accept, 'site_dir')
if len(kill) == 0:
PmagResRec = {} # set up dictionary for the pmag_results table entry
PmagResRec['data_type'] = 'i' # decorate it a bit
PmagResRec['magic_software_packages'] = version_num
PmagSiteRec['site_description'] = 'Site direction included in results table'
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
dec = float(PmagSiteRec["site_dec"])
inc = float(PmagSiteRec["site_inc"])
if 'site_alpha95' in list(PmagSiteRec.keys()) and PmagSiteRec['site_alpha95'] != "":
a95 = float(PmagSiteRec["site_alpha95"])
else:
a95 = 180.
sitedat = pmag.get_dictitem(SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')[
0] # fish out site information (lat/lon, etc.)
lat = float(sitedat['site_lat'])
lon = float(sitedat['site_lon'])
plon, plat, dp, dm = pmag.dia_vgp(
dec, inc, a95, lat, lon) # get the VGP for this site
if PmagSiteRec['site_tilt_correction'] == '-1':
C = ' (spec coord) '
if PmagSiteRec['site_tilt_correction'] == '0':
C = ' (geog. coord) '
if PmagSiteRec['site_tilt_correction'] == '100':
C = ' (strat. coord) '
PmagResRec["pmag_result_name"] = "VGP Site: " + \
PmagSiteRec["er_site_name"] # decorate some more
PmagResRec["result_description"] = "Site VGP, coord system = " + \
str(coord) + ' component: ' + comp
PmagResRec['er_site_names'] = PmagSiteRec['er_site_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['er_citation_names'] = 'This study'
PmagResRec['er_analyst_mail_names'] = user
PmagResRec["er_location_names"] = PmagSiteRec["er_location_name"]
if avg_directions_by_sample:
PmagResRec["er_sample_names"] = PmagSiteRec["er_sample_names"]
else:
PmagResRec["er_specimen_names"] = PmagSiteRec["er_specimen_names"]
PmagResRec["tilt_correction"] = PmagSiteRec['site_tilt_correction']
PmagResRec["pole_comp_name"] = PmagSiteRec['site_comp_name']
PmagResRec["average_dec"] = PmagSiteRec["site_dec"]
PmagResRec["average_inc"] = PmagSiteRec["site_inc"]
PmagResRec["average_alpha95"] = PmagSiteRec["site_alpha95"]
PmagResRec["average_n"] = PmagSiteRec["site_n"]
PmagResRec["average_n_lines"] = PmagSiteRec["site_n_lines"]
PmagResRec["average_n_planes"] = PmagSiteRec["site_n_planes"]
PmagResRec["vgp_n"] = PmagSiteRec["site_n"]
PmagResRec["average_k"] = PmagSiteRec["site_k"]
PmagResRec["average_r"] = PmagSiteRec["site_r"]
PmagResRec["average_lat"] = '%10.4f ' % (lat)
PmagResRec["average_lon"] = '%10.4f ' % (lon)
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagResRec["vgp_lat"] = '%7.1f ' % (plat)
PmagResRec["vgp_lon"] = '%7.1f ' % (plon)
PmagResRec["vgp_dp"] = '%7.1f ' % (dp)
PmagResRec["vgp_dm"] = '%7.1f ' % (dm)
PmagResRec["magic_method_codes"] = PmagSiteRec["magic_method_codes"]
if '0' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-GEO" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-GEO"
if '100' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-TILT" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-TILT"
PmagSiteRec['site_polarity'] = ""
if avg_by_polarity: # assign polarity based on angle of pole lat to spin axis - may want to re-think this sometime
angle = pmag.angle([0, 0], [0, (90 - plat)])
if angle <= 55.:
PmagSiteRec["site_polarity"] = 'n'
if angle > 55. and angle < 125.:
PmagSiteRec["site_polarity"] = 't'
if angle >= 125.:
PmagSiteRec["site_polarity"] = 'r'
PmagResults.append(PmagResRec)
if avg_by_polarity:
# find the tilt corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '100', 'T')
if len(crecs) < 2:
# if there aren't any, find the geographic corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '0', 'T')
if len(crecs) > 2: # if there are some,
comp = pmag.get_list(crecs, 'site_comp_name').split(':')[
0] # find the first component
# fish out all of the first component
crecs = pmag.get_dictitem(crecs, 'site_comp_name', comp, 'T')
precs = []
for rec in crecs:
precs.append({'dec': rec['site_dec'], 'inc': rec['site_inc'],
'name': rec['er_site_name'], 'loc': rec['er_location_name']})
# calculate average by polarity
polpars = pmag.fisher_by_pol(precs)
# hunt through all the modes (normal=A, reverse=B, all=ALL)
for mode in list(polpars.keys()):
PolRes = {}
PolRes['er_citation_names'] = 'This study'
PolRes["pmag_result_name"] = "Polarity Average: Polarity " + mode
PolRes["data_type"] = "a"
PolRes["average_dec"] = '%7.1f' % (polpars[mode]['dec'])
PolRes["average_inc"] = '%7.1f' % (polpars[mode]['inc'])
PolRes["average_n"] = '%i' % (polpars[mode]['n'])
PolRes["average_r"] = '%5.4f' % (polpars[mode]['r'])
PolRes["average_k"] = '%6.0f' % (polpars[mode]['k'])
PolRes["average_alpha95"] = '%7.1f' % (
polpars[mode]['alpha95'])
PolRes['er_site_names'] = polpars[mode]['sites']
PolRes['er_location_names'] = polpars[mode]['locs']
PolRes['magic_software_packages'] = version_num
PmagResults.append(PolRes)
if not skip_intensities and nositeints != 1:
for site in sites: # now do intensities for each site
if plotsites:
print(site)
if not avg_intensities_by_sample:
key, intlist = 'specimen', SpecInts # if using specimen level data
if avg_intensities_by_sample:
key, intlist = 'sample', PmagSamps # if using sample level data
# get all the intensities for this site
Ints = pmag.get_dictitem(intlist, 'er_site_name', site, 'T')
if len(Ints) > 0: # there are some
# get average intensity stuff for site table
PmagSiteRec = pmag.average_int(Ints, key, 'site')
# get average intensity stuff for results table
PmagResRec = pmag.average_int(Ints, key, 'average')
if plotsites: # if site by site examination requested - print this site out to the screen
for rec in Ints:
print(rec['er_' + key + '_name'], ' %7.1f' %
(1e6 * float(rec[key + '_int'])))
if len(Ints) > 1:
print('Average: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int'])), 'N: ', len(Ints))
print('Sigma: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int_sigma'])), 'Sigma %: ', PmagResRec['average_int_sigma_perc'])
input('Press any key to continue\n')
er_location_name = Ints[0]["er_location_name"]
# decorate the records
PmagSiteRec["er_location_name"] = er_location_name
PmagSiteRec["er_citation_names"] = "This study"
PmagResRec["er_location_names"] = er_location_name
PmagResRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagResRec["er_analyst_mail_names"] = user
PmagResRec["data_type"] = 'i'
if not avg_intensities_by_sample:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name') # list of all specimens used
PmagResRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name') # list of all samples used
PmagResRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name')
PmagSiteRec['er_site_name'] = site
PmagResRec['er_site_names'] = site
PmagSiteRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
PmagResRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
kill = pmag.grade(PmagSiteRec, accept, 'site_int')
if nocrit == 1 or len(kill) == 0:
b, sig = float(PmagResRec['average_int']), ""
if(PmagResRec['average_int_sigma']) != "":
sig = float(PmagResRec['average_int_sigma'])
# fish out site direction
sdir = pmag.get_dictitem(
PmagResults, 'er_site_names', site, 'T')
# get the VDM for this record using last average
# inclination (hope it is the right one!)
if len(sdir) > 0 and sdir[-1]['average_inc'] != "":
inc = float(sdir[0]['average_inc'])
# get magnetic latitude using dipole formula
mlat = pmag.magnetic_lat(inc)
# get VDM with magnetic latitude
PmagResRec["vdm"] = '%8.3e ' % (pmag.b_vdm(b, mlat))
PmagResRec["vdm_n"] = PmagResRec['average_int_n']
if 'average_int_sigma' in list(PmagResRec.keys()) and PmagResRec['average_int_sigma'] != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), mlat)
PmagResRec["vdm_sigma"] = '%8.3e ' % (vdm_sig)
else:
PmagResRec["vdm_sigma"] = ""
mlat = "" # define a model latitude
if get_model_lat == 1: # use present site latitude
mlats = pmag.get_dictitem(
SiteNFO, 'er_site_name', site, 'T')
if len(mlats) > 0:
mlat = mlats[0]['site_lat']
# use a model latitude from some plate reconstruction model
# (or something)
elif get_model_lat == 2:
mlats = pmag.get_dictitem(
ModelLats, 'er_site_name', site, 'T')
if len(mlats) > 0:
PmagResRec['model_lat'] = mlats[0]['site_model_lat']
mlat = PmagResRec['model_lat']
if mlat != "":
# get the VADM using the desired latitude
PmagResRec["vadm"] = '%8.3e ' % (
pmag.b_vdm(b, float(mlat)))
if sig != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), float(mlat))
PmagResRec["vadm_sigma"] = '%8.3e ' % (vdm_sig)
PmagResRec["vadm_n"] = PmagResRec['average_int_n']
else:
PmagResRec["vadm_sigma"] = ""
# fish out site information (lat/lon, etc.)
sitedat = pmag.get_dictitem(
SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')
if len(sitedat) > 0:
sitedat = sitedat[0]
PmagResRec['average_lat'] = sitedat['site_lat']
PmagResRec['average_lon'] = sitedat['site_lon']
else:
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['magic_software_packages'] = version_num
PmagResRec["pmag_result_name"] = "V[A]DM: Site " + site
PmagResRec["result_description"] = "V[A]DM of site"
PmagResRec["pmag_criteria_codes"] = "ACCEPT"
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagSites.append(PmagSiteRec)
PmagResults.append(PmagResRec)
if len(PmagSites) > 0:
Tmp, keylist = pmag.fillkeys(PmagSites)
pmag.magic_write(siteout, Tmp, 'pmag_sites')
print(' sites written to ', siteout)
else:
print("No Site level table")
if len(PmagResults) > 0:
TmpRes, keylist = pmag.fillkeys(PmagResults)
pmag.magic_write(resout, TmpRes, 'pmag_results')
print(' results written to ', resout)
else:
print("No Results level table") | Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages.
@param -> infile: path from the WD to the pmag speciemns table
@param -> measfile: path from the WD to the magic measurement file
@param -> sampfile: path from the WD to the er sample file
@param -> sitefile: path from the WD to the er sites data file
@param -> agefile: path from the WD to the er ages data file
@param -> specout: path from the WD to the place to write the er specimens data file
@param -> sampout: path from the WD to the place to write the pmag samples data file
@param -> siteout: path from the WD to the place to write the pmag sites data file
@param -> resout: path from the WD to the place to write the pmag results data file
@param -> critout: path from the WD to the place to write the pmag criteria file
@param -> instout: path from th WD to the place to write the magic instruments file
@param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string | Below is the the instruction that describes the task:
### Input:
Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages.
@param -> infile: path from the WD to the pmag speciemns table
@param -> measfile: path from the WD to the magic measurement file
@param -> sampfile: path from the WD to the er sample file
@param -> sitefile: path from the WD to the er sites data file
@param -> agefile: path from the WD to the er ages data file
@param -> specout: path from the WD to the place to write the er specimens data file
@param -> sampout: path from the WD to the place to write the pmag samples data file
@param -> siteout: path from the WD to the place to write the pmag sites data file
@param -> resout: path from the WD to the place to write the pmag results data file
@param -> critout: path from the WD to the place to write the pmag criteria file
@param -> instout: path from th WD to the place to write the magic instruments file
@param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string
### Response:
def specimens_results_magic(infile='pmag_specimens.txt', measfile='magic_measurements.txt', sampfile='er_samples.txt', sitefile='er_sites.txt', agefile='er_ages.txt', specout='er_specimens.txt', sampout='pmag_samples.txt', siteout='pmag_sites.txt', resout='pmag_results.txt', critout='pmag_criteria.txt', instout='magic_instruments.txt', plotsites=False, fmt='svg', dir_path='.', cors=[], priorities=['DA-AC-ARM', 'DA-AC-TRM'], coord='g', user='', vgps_level='site', do_site_intensity=True, DefaultAge=["none"], avg_directions_by_sample=False, avg_intensities_by_sample=False, avg_all_components=False, avg_by_polarity=False, skip_directions=False, skip_intensities=False, use_sample_latitude=False, use_paleolatitude=False, use_criteria='default'):
"""
Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages.
@param -> infile: path from the WD to the pmag speciemns table
@param -> measfile: path from the WD to the magic measurement file
@param -> sampfile: path from the WD to the er sample file
@param -> sitefile: path from the WD to the er sites data file
@param -> agefile: path from the WD to the er ages data file
@param -> specout: path from the WD to the place to write the er specimens data file
@param -> sampout: path from the WD to the place to write the pmag samples data file
@param -> siteout: path from the WD to the place to write the pmag sites data file
@param -> resout: path from the WD to the place to write the pmag results data file
@param -> critout: path from the WD to the place to write the pmag criteria file
@param -> instout: path from th WD to the place to write the magic instruments file
@param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string
"""
# initialize some variables
plotsites = False # cannot use draw_figs from within ipmag
Comps = [] # list of components
version_num = pmag.get_version()
args = sys.argv
model_lat_file = ""
Dcrit, Icrit, nocrit = 0, 0, 0
corrections = []
nocorrection = ['DA-NL', 'DA-AC', 'DA-CR']
# do some data adjustments
for cor in cors:
nocorrection.remove('DA-' + cor)
corrections.append('DA-' + cor)
for p in priorities:
if not p.startswith('DA-AC-'):
p = 'DA-AC-' + p
# translate coord into coords
if coord == 's':
coords = ['-1']
if coord == 'g':
coords = ['0']
if coord == 't':
coords = ['100']
if coord == 'b':
coords = ['0', '100']
if vgps_level == 'sample':
vgps = 1 # save sample level VGPS/VADMs
else:
vgps = 0 # site level
if do_site_intensity:
nositeints = 0
else:
nositeints = 1
# chagne these all to True/False instead of 1/0
if not skip_intensities:
# set model lat and
if use_sample_latitude and use_paleolatitude:
print("you should set a paleolatitude file OR use present day lat - not both")
return False
elif use_sample_latitude:
get_model_lat = 1
elif use_paleolatitude:
get_model_lat = 2
try:
model_lat_file = dir_path + '/' + args[ind + 1]
get_model_lat = 2
mlat = open(model_lat_file, 'r')
ModelLats = []
for line in mlat.readlines():
ModelLat = {}
tmp = line.split()
ModelLat["er_site_name"] = tmp[0]
ModelLat["site_model_lat"] = tmp[1]
ModelLat["er_sample_name"] = tmp[0]
ModelLat["sample_lat"] = tmp[1]
ModelLats.append(ModelLat)
mlat.clos()
except:
print("use_paleolatitude option requires a valid paleolatitude file")
else:
get_model_lat = 0 # skips VADM calculation entirely
if plotsites and not skip_directions: # plot by site - set up plot window
EQ = {}
EQ['eqarea'] = 1
# define figure 1 as equal area projection
pmagplotlib.plot_init(EQ['eqarea'], 5, 5)
# I don't know why this has to be here, but otherwise the first plot
# never plots...
pmagplotlib.plot_net(EQ['eqarea'])
pmagplotlib.draw_figs(EQ)
infile = os.path.join(dir_path, infile)
measfile = os.path.join(dir_path, measfile)
instout = os.path.join(dir_path, instout)
sampfile = os.path.join(dir_path, sampfile)
sitefile = os.path.join(dir_path, sitefile)
agefile = os.path.join(dir_path, agefile)
specout = os.path.join(dir_path, specout)
sampout = os.path.join(dir_path, sampout)
siteout = os.path.join(dir_path, siteout)
resout = os.path.join(dir_path, resout)
critout = os.path.join(dir_path, critout)
if use_criteria == 'none':
Dcrit, Icrit, nocrit = 1, 1, 1 # no selection criteria
crit_data = pmag.default_criteria(nocrit)
elif use_criteria == 'default':
crit_data = pmag.default_criteria(nocrit) # use default criteria
elif use_criteria == 'existing':
crit_data, file_type = pmag.magic_read(
critout) # use pmag_criteria file
print("Acceptance criteria read in from ", critout)
accept = {}
for critrec in crit_data:
for key in list(critrec.keys()):
# need to migrate specimen_dang to specimen_int_dang for intensity
# data using old format
if 'IE-SPEC' in list(critrec.keys()) and 'specimen_dang' in list(critrec.keys()) and 'specimen_int_dang' not in list(critrec.keys()):
critrec['specimen_int_dang'] = critrec['specimen_dang']
del critrec['specimen_dang']
# need to get rid of ron shaars sample_int_sigma_uT
if 'sample_int_sigma_uT' in list(critrec.keys()):
critrec['sample_int_sigma'] = '%10.3e' % (
eval(critrec['sample_int_sigma_uT']) * 1e-6)
if key not in list(accept.keys()) and critrec[key] != '':
accept[key] = critrec[key]
if use_criteria == 'default':
pmag.magic_write(critout, [accept], 'pmag_criteria')
print("\n Pmag Criteria stored in ", critout, '\n')
# now we're done slow dancing
# read in site data - has the lats and lons
SiteNFO, file_type = pmag.magic_read(sitefile)
# read in site data - has the lats and lons
SampNFO, file_type = pmag.magic_read(sampfile)
# find all the sites with height info.
height_nfo = pmag.get_dictitem(SiteNFO, 'site_height', '', 'F')
if agefile:
AgeNFO, file_type = pmag.magic_read(
agefile) # read in the age information
# read in specimen interpretations
Data, file_type = pmag.magic_read(infile)
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
comment, orient = "", []
samples, sites = [], []
for rec in Data: # run through the data filling in missing keys and finding all components, coordinates available
# fill in missing fields, collect unique sample and site names
if 'er_sample_name' not in list(rec.keys()):
rec['er_sample_name'] = ""
elif rec['er_sample_name'] not in samples:
samples.append(rec['er_sample_name'])
if 'er_site_name' not in list(rec.keys()):
rec['er_site_name'] = ""
elif rec['er_site_name'] not in sites:
sites.append(rec['er_site_name'])
if 'specimen_int' not in list(rec.keys()):
rec['specimen_int'] = ''
if 'specimen_comp_name' not in list(rec.keys()) or rec['specimen_comp_name'] == "":
rec['specimen_comp_name'] = 'A'
if rec['specimen_comp_name'] not in Comps:
Comps.append(rec['specimen_comp_name'])
rec['specimen_tilt_correction'] = rec['specimen_tilt_correction'].strip(
'\n')
if "specimen_tilt_correction" not in list(rec.keys()):
rec["specimen_tilt_correction"] = "-1" # assume sample coordinates
if rec["specimen_tilt_correction"] not in orient:
# collect available coordinate systems
orient.append(rec["specimen_tilt_correction"])
if "specimen_direction_type" not in list(rec.keys()):
# assume direction is line - not plane
rec["specimen_direction_type"] = 'l'
if "specimen_dec" not in list(rec.keys()):
# if no declination, set direction type to blank
rec["specimen_direction_type"] = ''
if "specimen_n" not in list(rec.keys()):
rec["specimen_n"] = '' # put in n
if "specimen_alpha95" not in list(rec.keys()):
rec["specimen_alpha95"] = '' # put in alpha95
if "magic_method_codes" not in list(rec.keys()):
rec["magic_method_codes"] = ''
# start parsing data into SpecDirs, SpecPlanes, SpecInts
SpecInts, SpecDirs, SpecPlanes = [], [], []
samples.sort() # get sorted list of samples and sites
sites.sort()
if not skip_intensities: # don't skip intensities
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
if nocrit == 0: # use selection criteria
for rec in IntData: # do selection criteria
kill = pmag.grade(rec, accept, 'specimen_int')
if len(kill) == 0:
# intensity record to be included in sample, site
# calculations
SpecInts.append(rec)
else:
SpecInts = IntData[:] # take everything - no selection criteria
# check for required data adjustments
if len(corrections) > 0 and len(SpecInts) > 0:
for cor in corrections:
# only take specimens with the required corrections
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'has')
if len(nocorrection) > 0 and len(SpecInts) > 0:
for cor in nocorrection:
# exclude the corrections not specified for inclusion
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'not')
# take top priority specimen of its name in remaining specimens (only one
# per customer)
PrioritySpecInts = []
specimens = pmag.get_specs(SpecInts) # get list of uniq specimen names
for spec in specimens:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'er_specimen_name', spec, 'T')
if len(ThisSpecRecs) == 1:
PrioritySpecInts.append(ThisSpecRecs[0])
elif len(ThisSpecRecs) > 1: # more than one
prec = []
for p in priorities:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'magic_method_codes', p, 'has')
if len(ThisSpecRecs) > 0:
prec.append(ThisSpecRecs[0])
PrioritySpecInts.append(prec[0]) # take the best one
SpecInts = PrioritySpecInts # this has the first specimen record
if not skip_directions: # don't skip directions
# retrieve specimens with directed lines and planes
AllDirs = pmag.get_dictitem(Data, 'specimen_direction_type', '', 'F')
# get all specimens with specimen_n information
Ns = pmag.get_dictitem(AllDirs, 'specimen_n', '', 'F')
if nocrit != 1: # use selection criteria
for rec in Ns: # look through everything with specimen_n for "good" data
kill = pmag.grade(rec, accept, 'specimen_dir')
if len(kill) == 0: # nothing killed it
SpecDirs.append(rec)
else: # no criteria
SpecDirs = AllDirs[:] # take them all
# SpecDirs is now the list of all specimen directions (lines and planes)
# that pass muster
# list of all sample data and list of those that pass the DE-SAMP criteria
PmagSamps, SampDirs = [], []
PmagSites, PmagResults = [], [] # list of all site data and selected results
SampInts = []
for samp in samples: # run through the sample names
if avg_directions_by_sample: # average by sample if desired
# get all the directional data for this sample
SampDir = pmag.get_dictitem(SpecDirs, 'er_sample_name', samp, 'T')
if len(SampDir) > 0: # there are some directions
for coord in coords: # step through desired coordinate systems
# get all the directions for this sample
CoordDir = pmag.get_dictitem(
SampDir, 'specimen_tilt_correction', coord, 'T')
if len(CoordDir) > 0: # there are some with this coordinate system
if not avg_all_components: # look component by component
for comp in Comps:
# get all directions from this component
CompDir = pmag.get_dictitem(
CoordDir, 'specimen_comp_name', comp, 'T')
if len(CompDir) > 0: # there are some
# get a sample average from all specimens
PmagSampRec = pmag.lnpbykey(
CompDir, 'sample', 'specimen')
# decorate the sample record
PmagSampRec["er_location_name"] = CompDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CompDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if CompDir[0]['specimen_flag'] == 'g':
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_comp_name'] = comp
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['er_specimen_names'] = pmag.get_list(
CompDir, 'er_specimen_name') # get a list of the specimen names used
PmagSampRec['magic_method_codes'] = pmag.get_list(
CompDir, 'magic_method_codes') # get a list of the methods used
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
else:
kill = []
if len(kill) == 0:
SampDirs.append(PmagSampRec)
if vgps == 1: # if sample level VGP info desired, do that now
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
# print(PmagSampRec)
PmagSamps.append(PmagSampRec)
if avg_all_components: # average all components together basically same as above
PmagSampRec = pmag.lnpbykey(
CoordDir, 'sample', 'specimen')
PmagSampRec["er_location_name"] = CoordDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CoordDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if all(i['specimen_flag'] == 'g' for i in CoordDir):
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = ""
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['sample_comp_name'] = pmag.get_list(
CoordDir, 'specimen_comp_name') # get components used
PmagSampRec['er_specimen_names'] = pmag.get_list(
CoordDir, 'er_specimen_name') # get specimne names averaged
PmagSampRec['magic_method_codes'] = pmag.get_list(
CoordDir, 'magic_method_codes') # assemble method codes
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
if len(kill) == 0: # passes the mustard
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
else: # take everything
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
PmagSamps.append(PmagSampRec)
if avg_intensities_by_sample: # average by sample if desired
# get all the intensity data for this sample
SampI = pmag.get_dictitem(SpecInts, 'er_sample_name', samp, 'T')
if len(SampI) > 0: # there are some
# get average intensity stuff
PmagSampRec = pmag.average_int(SampI, 'specimen', 'sample')
# decorate sample record
PmagSampRec["sample_description"] = "sample intensity"
PmagSampRec["sample_direction_type"] = ""
PmagSampRec['er_site_name'] = SampI[0]["er_site_name"]
PmagSampRec['er_sample_name'] = samp
PmagSampRec['er_location_name'] = SampI[0]["er_location_name"]
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['er_specimen_names'] = pmag.get_list(
SampI, 'er_specimen_name')
PmagSampRec['magic_method_codes'] = pmag.get_list(
SampI, 'magic_method_codes')
if nocrit != 1: # apply criteria!
kill = pmag.grade(PmagSampRec, accept, 'sample_int')
if len(kill) == 0:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
else:
PmagSampRec = {} # sample rejected
else: # no criteria
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
PmagSampRec['pmag_criteria_codes'] = ""
if vgps == 1 and get_model_lat != 0 and PmagSampRec != {}:
if get_model_lat == 1: # use sample latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, SampNFO)
# get rid of the model lat key
del(PmagResRec['model_lat'])
elif get_model_lat == 2: # use model latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, ModelLats)
if PmagResRec != {}:
PmagResRec['magic_method_codes'] = PmagResRec['magic_method_codes'] + ":IE-MLAT"
if PmagResRec != {}:
PmagResRec['er_specimen_names'] = PmagSampRec['er_specimen_names']
PmagResRec['er_sample_names'] = PmagSampRec['er_sample_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['average_int_sigma_perc'] = PmagSampRec['sample_int_sigma_perc']
PmagResRec['average_int_sigma'] = PmagSampRec['sample_int_sigma']
PmagResRec['average_int_n'] = PmagSampRec['sample_int_n']
PmagResRec['vadm_n'] = PmagSampRec['sample_int_n']
PmagResRec['data_type'] = 'i'
PmagResults.append(PmagResRec)
if len(PmagSamps) > 0:
# fill in missing keys from different types of records
TmpSamps, keylist = pmag.fillkeys(PmagSamps)
# save in sample output file
pmag.magic_write(sampout, TmpSamps, 'pmag_samples')
print(' sample averages written to ', sampout)
#
# create site averages from specimens or samples as specified
#
for site in sites:
for coord in coords:
if not avg_directions_by_sample:
key, dirlist = 'specimen', SpecDirs # if specimen averages at site level desired
if avg_directions_by_sample:
key, dirlist = 'sample', SampDirs # if sample averages at site level desired
# get all the sites with directions
tmp = pmag.get_dictitem(dirlist, 'er_site_name', site, 'T')
# use only the last coordinate if avg_all_components==False
tmp1 = pmag.get_dictitem(tmp, key + '_tilt_correction', coord, 'T')
# fish out site information (lat/lon, etc.)
sd = pmag.get_dictitem(SiteNFO, 'er_site_name', site, 'T')
if len(sd) > 0:
sitedat = sd[0]
if not avg_all_components: # do component wise averaging
for comp in Comps:
# get all components comp
siteD = pmag.get_dictitem(
tmp1, key + '_comp_name', comp, 'T')
# remove bad data from means
quality_siteD = []
# remove any records for which specimen_flag or sample_flag are 'b'
# assume 'g' if flag is not provided
for rec in siteD:
spec_quality = rec.get('specimen_flag', 'g')
samp_quality = rec.get('sample_flag', 'g')
if (spec_quality == 'g') and (samp_quality == 'g'):
quality_siteD.append(rec)
siteD = quality_siteD
if len(siteD) > 0: # there are some for this site and component name
# get an average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the site record
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if avg_directions_by_sample:
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
else:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
# determine the demagnetization code (DC3,4 or 5) for this site
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if plotsites:
print(PmagSiteRec['er_site_name'])
# plot and list the data
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else: # last component only
# get the last orientation system specified
siteD = tmp1[:]
if len(siteD) > 0: # there are some
# get the average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the record
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if not avg_directions_by_sample:
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if plotsites:
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else:
print('site information not found in er_sites for site, ',
site, ' site will be skipped')
for PmagSiteRec in PmagSites: # now decorate each dictionary some more, and calculate VGPs etc. for results table
PmagSiteRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagSiteRec['magic_software_packages'] = version_num
if agefile != "":
PmagSiteRec = pmag.get_age(
PmagSiteRec, "er_site_name", "site_inferred_", AgeNFO, DefaultAge)
PmagSiteRec['pmag_criteria_codes'] = 'ACCEPT'
if 'site_n_lines' in list(PmagSiteRec.keys()) and 'site_n_planes' in list(PmagSiteRec.keys()) and PmagSiteRec['site_n_lines'] != "" and PmagSiteRec['site_n_planes'] != "":
if int(PmagSiteRec["site_n_planes"]) > 0:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM-LP"
elif int(PmagSiteRec["site_n_lines"]) > 2:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM"
kill = pmag.grade(PmagSiteRec, accept, 'site_dir')
if len(kill) == 0:
PmagResRec = {} # set up dictionary for the pmag_results table entry
PmagResRec['data_type'] = 'i' # decorate it a bit
PmagResRec['magic_software_packages'] = version_num
PmagSiteRec['site_description'] = 'Site direction included in results table'
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
dec = float(PmagSiteRec["site_dec"])
inc = float(PmagSiteRec["site_inc"])
if 'site_alpha95' in list(PmagSiteRec.keys()) and PmagSiteRec['site_alpha95'] != "":
a95 = float(PmagSiteRec["site_alpha95"])
else:
a95 = 180.
sitedat = pmag.get_dictitem(SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')[
0] # fish out site information (lat/lon, etc.)
lat = float(sitedat['site_lat'])
lon = float(sitedat['site_lon'])
plon, plat, dp, dm = pmag.dia_vgp(
dec, inc, a95, lat, lon) # get the VGP for this site
if PmagSiteRec['site_tilt_correction'] == '-1':
C = ' (spec coord) '
if PmagSiteRec['site_tilt_correction'] == '0':
C = ' (geog. coord) '
if PmagSiteRec['site_tilt_correction'] == '100':
C = ' (strat. coord) '
PmagResRec["pmag_result_name"] = "VGP Site: " + \
PmagSiteRec["er_site_name"] # decorate some more
PmagResRec["result_description"] = "Site VGP, coord system = " + \
str(coord) + ' component: ' + comp
PmagResRec['er_site_names'] = PmagSiteRec['er_site_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['er_citation_names'] = 'This study'
PmagResRec['er_analyst_mail_names'] = user
PmagResRec["er_location_names"] = PmagSiteRec["er_location_name"]
if avg_directions_by_sample:
PmagResRec["er_sample_names"] = PmagSiteRec["er_sample_names"]
else:
PmagResRec["er_specimen_names"] = PmagSiteRec["er_specimen_names"]
PmagResRec["tilt_correction"] = PmagSiteRec['site_tilt_correction']
PmagResRec["pole_comp_name"] = PmagSiteRec['site_comp_name']
PmagResRec["average_dec"] = PmagSiteRec["site_dec"]
PmagResRec["average_inc"] = PmagSiteRec["site_inc"]
PmagResRec["average_alpha95"] = PmagSiteRec["site_alpha95"]
PmagResRec["average_n"] = PmagSiteRec["site_n"]
PmagResRec["average_n_lines"] = PmagSiteRec["site_n_lines"]
PmagResRec["average_n_planes"] = PmagSiteRec["site_n_planes"]
PmagResRec["vgp_n"] = PmagSiteRec["site_n"]
PmagResRec["average_k"] = PmagSiteRec["site_k"]
PmagResRec["average_r"] = PmagSiteRec["site_r"]
PmagResRec["average_lat"] = '%10.4f ' % (lat)
PmagResRec["average_lon"] = '%10.4f ' % (lon)
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagResRec["vgp_lat"] = '%7.1f ' % (plat)
PmagResRec["vgp_lon"] = '%7.1f ' % (plon)
PmagResRec["vgp_dp"] = '%7.1f ' % (dp)
PmagResRec["vgp_dm"] = '%7.1f ' % (dm)
PmagResRec["magic_method_codes"] = PmagSiteRec["magic_method_codes"]
if '0' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-GEO" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-GEO"
if '100' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-TILT" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-TILT"
PmagSiteRec['site_polarity'] = ""
if avg_by_polarity: # assign polarity based on angle of pole lat to spin axis - may want to re-think this sometime
angle = pmag.angle([0, 0], [0, (90 - plat)])
if angle <= 55.:
PmagSiteRec["site_polarity"] = 'n'
if angle > 55. and angle < 125.:
PmagSiteRec["site_polarity"] = 't'
if angle >= 125.:
PmagSiteRec["site_polarity"] = 'r'
PmagResults.append(PmagResRec)
if avg_by_polarity:
# find the tilt corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '100', 'T')
if len(crecs) < 2:
# if there aren't any, find the geographic corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '0', 'T')
if len(crecs) > 2: # if there are some,
comp = pmag.get_list(crecs, 'site_comp_name').split(':')[
0] # find the first component
# fish out all of the first component
crecs = pmag.get_dictitem(crecs, 'site_comp_name', comp, 'T')
precs = []
for rec in crecs:
precs.append({'dec': rec['site_dec'], 'inc': rec['site_inc'],
'name': rec['er_site_name'], 'loc': rec['er_location_name']})
# calculate average by polarity
polpars = pmag.fisher_by_pol(precs)
# hunt through all the modes (normal=A, reverse=B, all=ALL)
for mode in list(polpars.keys()):
PolRes = {}
PolRes['er_citation_names'] = 'This study'
PolRes["pmag_result_name"] = "Polarity Average: Polarity " + mode
PolRes["data_type"] = "a"
PolRes["average_dec"] = '%7.1f' % (polpars[mode]['dec'])
PolRes["average_inc"] = '%7.1f' % (polpars[mode]['inc'])
PolRes["average_n"] = '%i' % (polpars[mode]['n'])
PolRes["average_r"] = '%5.4f' % (polpars[mode]['r'])
PolRes["average_k"] = '%6.0f' % (polpars[mode]['k'])
PolRes["average_alpha95"] = '%7.1f' % (
polpars[mode]['alpha95'])
PolRes['er_site_names'] = polpars[mode]['sites']
PolRes['er_location_names'] = polpars[mode]['locs']
PolRes['magic_software_packages'] = version_num
PmagResults.append(PolRes)
if not skip_intensities and nositeints != 1:
for site in sites: # now do intensities for each site
if plotsites:
print(site)
if not avg_intensities_by_sample:
key, intlist = 'specimen', SpecInts # if using specimen level data
if avg_intensities_by_sample:
key, intlist = 'sample', PmagSamps # if using sample level data
# get all the intensities for this site
Ints = pmag.get_dictitem(intlist, 'er_site_name', site, 'T')
if len(Ints) > 0: # there are some
# get average intensity stuff for site table
PmagSiteRec = pmag.average_int(Ints, key, 'site')
# get average intensity stuff for results table
PmagResRec = pmag.average_int(Ints, key, 'average')
if plotsites: # if site by site examination requested - print this site out to the screen
for rec in Ints:
print(rec['er_' + key + '_name'], ' %7.1f' %
(1e6 * float(rec[key + '_int'])))
if len(Ints) > 1:
print('Average: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int'])), 'N: ', len(Ints))
print('Sigma: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int_sigma'])), 'Sigma %: ', PmagResRec['average_int_sigma_perc'])
input('Press any key to continue\n')
er_location_name = Ints[0]["er_location_name"]
# decorate the records
PmagSiteRec["er_location_name"] = er_location_name
PmagSiteRec["er_citation_names"] = "This study"
PmagResRec["er_location_names"] = er_location_name
PmagResRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagResRec["er_analyst_mail_names"] = user
PmagResRec["data_type"] = 'i'
if not avg_intensities_by_sample:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name') # list of all specimens used
PmagResRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name') # list of all samples used
PmagResRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name')
PmagSiteRec['er_site_name'] = site
PmagResRec['er_site_names'] = site
PmagSiteRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
PmagResRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
kill = pmag.grade(PmagSiteRec, accept, 'site_int')
if nocrit == 1 or len(kill) == 0:
b, sig = float(PmagResRec['average_int']), ""
if(PmagResRec['average_int_sigma']) != "":
sig = float(PmagResRec['average_int_sigma'])
# fish out site direction
sdir = pmag.get_dictitem(
PmagResults, 'er_site_names', site, 'T')
# get the VDM for this record using last average
# inclination (hope it is the right one!)
if len(sdir) > 0 and sdir[-1]['average_inc'] != "":
inc = float(sdir[0]['average_inc'])
# get magnetic latitude using dipole formula
mlat = pmag.magnetic_lat(inc)
# get VDM with magnetic latitude
PmagResRec["vdm"] = '%8.3e ' % (pmag.b_vdm(b, mlat))
PmagResRec["vdm_n"] = PmagResRec['average_int_n']
if 'average_int_sigma' in list(PmagResRec.keys()) and PmagResRec['average_int_sigma'] != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), mlat)
PmagResRec["vdm_sigma"] = '%8.3e ' % (vdm_sig)
else:
PmagResRec["vdm_sigma"] = ""
mlat = "" # define a model latitude
if get_model_lat == 1: # use present site latitude
mlats = pmag.get_dictitem(
SiteNFO, 'er_site_name', site, 'T')
if len(mlats) > 0:
mlat = mlats[0]['site_lat']
# use a model latitude from some plate reconstruction model
# (or something)
elif get_model_lat == 2:
mlats = pmag.get_dictitem(
ModelLats, 'er_site_name', site, 'T')
if len(mlats) > 0:
PmagResRec['model_lat'] = mlats[0]['site_model_lat']
mlat = PmagResRec['model_lat']
if mlat != "":
# get the VADM using the desired latitude
PmagResRec["vadm"] = '%8.3e ' % (
pmag.b_vdm(b, float(mlat)))
if sig != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), float(mlat))
PmagResRec["vadm_sigma"] = '%8.3e ' % (vdm_sig)
PmagResRec["vadm_n"] = PmagResRec['average_int_n']
else:
PmagResRec["vadm_sigma"] = ""
# fish out site information (lat/lon, etc.)
sitedat = pmag.get_dictitem(
SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')
if len(sitedat) > 0:
sitedat = sitedat[0]
PmagResRec['average_lat'] = sitedat['site_lat']
PmagResRec['average_lon'] = sitedat['site_lon']
else:
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['magic_software_packages'] = version_num
PmagResRec["pmag_result_name"] = "V[A]DM: Site " + site
PmagResRec["result_description"] = "V[A]DM of site"
PmagResRec["pmag_criteria_codes"] = "ACCEPT"
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagSites.append(PmagSiteRec)
PmagResults.append(PmagResRec)
if len(PmagSites) > 0:
Tmp, keylist = pmag.fillkeys(PmagSites)
pmag.magic_write(siteout, Tmp, 'pmag_sites')
print(' sites written to ', siteout)
else:
print("No Site level table")
if len(PmagResults) > 0:
TmpRes, keylist = pmag.fillkeys(PmagResults)
pmag.magic_write(resout, TmpRes, 'pmag_results')
print(' results written to ', resout)
else:
print("No Results level table") |
def sort_subtrees(self, *subtrees):
"""sort the given subtrees (of type DGParentedTree) based on their
linear position in this RSTTree. If two subtrees have the same
linear position in the RSTTree (i.e. one is a child of the other),
they are sorted by their height in reverse order (i.e. the child
appears before its parent).
"""
subtrees_desc_height = sorted(subtrees,
key=methodcaller('node_height', self),
reverse=True)
return sorted(subtrees_desc_height,
key=methodcaller('get_position', self)) | sort the given subtrees (of type DGParentedTree) based on their
linear position in this RSTTree. If two subtrees have the same
linear position in the RSTTree (i.e. one is a child of the other),
they are sorted by their height in reverse order (i.e. the child
appears before its parent). | Below is the the instruction that describes the task:
### Input:
sort the given subtrees (of type DGParentedTree) based on their
linear position in this RSTTree. If two subtrees have the same
linear position in the RSTTree (i.e. one is a child of the other),
they are sorted by their height in reverse order (i.e. the child
appears before its parent).
### Response:
def sort_subtrees(self, *subtrees):
"""sort the given subtrees (of type DGParentedTree) based on their
linear position in this RSTTree. If two subtrees have the same
linear position in the RSTTree (i.e. one is a child of the other),
they are sorted by their height in reverse order (i.e. the child
appears before its parent).
"""
subtrees_desc_height = sorted(subtrees,
key=methodcaller('node_height', self),
reverse=True)
return sorted(subtrees_desc_height,
key=methodcaller('get_position', self)) |
def legal_node_coords():
"""
Return all legal node coordinates on the grid
"""
nodes = set()
for tile_id in legal_tile_ids():
for node in nodes_touching_tile(tile_id):
nodes.add(node)
logging.debug('Legal node coords({})={}'.format(len(nodes), nodes))
return nodes | Return all legal node coordinates on the grid | Below is the the instruction that describes the task:
### Input:
Return all legal node coordinates on the grid
### Response:
def legal_node_coords():
"""
Return all legal node coordinates on the grid
"""
nodes = set()
for tile_id in legal_tile_ids():
for node in nodes_touching_tile(tile_id):
nodes.add(node)
logging.debug('Legal node coords({})={}'.format(len(nodes), nodes))
return nodes |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.