content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def getHiddenStatus(data):
"""
使用Gaussian HMM对数据进行建模,并得到预测值
"""
cols = ["r_5", "r_20", "a_5", "a_20"]
model = GaussianHMM(n_components=3, covariance_type="full", n_iter=1000,
random_state=2010)
model.fit(data[cols])
hiddenStatus = model.predict(data[cols])
return hiddenStatus
|
4a613e426b8a4f16e02f535aebc2752d4a99ae25
| 3,640,524
|
def format_time(data, year):
"""Format any time variables in US.
Parameters
----------
data : pd.DataFrame
Data without time formatting.
year : int
The `year` of the wave being processed.
Returns
-------
data : pd.DataFrame
Data with time formatting.
"""
# See to do messages at the top of the file.
# Theres some wierd overlap in the pidp data. Theres essentially a gap in September 2008 with noone in it from
# BHPS which makes transition models fail.
# Following 2 lines are a stupid work around.
# if self.year <= 2008:
# self.year += 1
data["time"] = year
return data
|
858d7e48143a16e644d4f1241cd8918385dc7c5f
| 3,640,525
|
def get_connection(sid):
"""
Attempts to connect to the given server and
returns a connection.
"""
server = get_server(sid)
try:
shell = spur.SshShell(
hostname=server["host"],
username=server["username"],
password=server["password"],
port=server["port"],
missing_host_key=spur.ssh.MissingHostKey.accept,
connect_timeout=10)
shell.run(["echo", "connected"])
except spur.ssh.ConnectionError as e:
raise WebException(
"Cannot connect to {}@{}:{} with the specified password".format(
server["username"], server["host"], server["port"]))
return shell
|
933aa768640455ed21b914c4cb432436a7225e4e
| 3,640,526
|
from typing import List
def tail(filename: str, nlines: int = 20, bsz: int = 4096) -> List[str]:
"""
Pure python equivalent of the UNIX ``tail`` command. Simply pass a filename and the number of lines you want to load
from the end of the file, and a ``List[str]`` of lines (in forward order) will be returned.
This function is simply a wrapper for the highly efficient :func:`.io_tail`, designed for usage with a small (<10,000) amount
of lines to be tailed. To allow for the lines to be returned in the correct order, it must load all ``nlines`` lines into memory
before it can return the data.
If you need to ``tail`` a large amount of data, e.g. 10,000+ lines of a logfile, you should consider using the lower level
function :func:`.io_tail` - which acts as a generator, only loading a certain amount of bytes into memory per iteration.
Example file ``/tmp/testing``::
this is an example 1
this is an example 2
this is an example 3
this is an example 4
this is an example 5
this is an example 6
Example usage::
>>> from privex.helpers import tail
>>> lines = tail('/tmp/testing', nlines=3)
>>> print("\\n".join(lines))
this is an example 4
this is an example 5
this is an example 6
:param str filename: Path to file to tail. Relative or absolute path. Absolute path is recommended for safety.
:param int nlines: Total number of lines to retrieve from the end of the file
:param int bsz: Block size (in bytes) to load with each iteration (default: 4096 bytes). DON'T CHANGE UNLESS YOU
UNDERSTAND WHAT THIS MEANS.
:return List[str] lines: The last 'nlines' lines of the file 'filename' - in forward order.
"""
res = []
with open(filename, 'rb') as fp:
for chunk in io_tail(f=fp, nlines=nlines, bsz=bsz):
res = chunk + res
return res
|
e5f94cdc349610189c85c82c66589c243063f5a6
| 3,640,528
|
def initialized(name, secret_shares=5, secret_threshold=3, pgp_keys=None,
keybase_users=None, unseal=True):
"""
Ensure that the vault instance has been initialized and run the
initialization if it has not.
:param name: The id used for the state definition
:param secret_shares: THe number of secret shares to use for the
initialization key
:param secret_threshold: The number of keys required to unseal the vault
:param pgp_keys: List of PGP public key strings to use for encrypting
the sealing keys
:param keybase_users: List of Keybase users to retrieve public PGP keys
for to use in encrypting the sealing keys
:param unseal: Whether to unseal the vault during initialization
:returns: Result of the execution
:rtype: dict
"""
ret = {'name': name,
'comment': '',
'result': '',
'changes': {}}
initialized = __salt__['vault.is_initialized']()
if initialized:
ret['result'] = True
ret['Comment'] = 'Vault is already initialized'
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Vault will be initialized.'
else:
success, sealing_keys, root_token = __salt__['vault.initialize'](
secret_shares, secret_threshold, pgp_keys, keybase_users, unseal
) if not initialized else (True, {}, '')
ret['result'] = success
ret['changes'] = {
'root_credentials': {
'new': {
'sealing_keys': sealing_keys,
'root_token': root_token
},
'old': {}
}
}
ret['comment'] = 'Vault has {}initialized'.format(
'' if success else 'failed to be ')
return ret
|
c2b88bb8875ded7c7274b0695a9de9fb287b0b57
| 3,640,529
|
def plot(plot, x, y, **kwargs):
"""
Adds series to plot. By default this is displayed as continuous line.
Refer to matplotlib.pyplot.plot() help for more info. X and y coordinates
are expected to be in user's data units.
Args:
plot: matplotlib.pyplot
Plot to which series should be added.
x: (float,)
Collection of x-coordinates in user units.
y: (float,)
Collection of y-coordinates in user units.
title: str
Series legend.
"""
# add series
return plot.plot(x, y, **kwargs)
|
1e861243a87b61461fb49dcadf19ec9099fa5a1f
| 3,640,530
|
def glyph_by_hershey_code(hershey_code):
"""
Returns the Hershey glyph corresponding to `hershey_code`.
"""
glyph = glyphs_by_hershey_code.get(hershey_code)
if glyph is None:
raise ValueError("No glyph for hershey code %d" % hershey_code)
return glyph
|
54a8c9657466f2348e93667e8a638c3e44681adb
| 3,640,531
|
def _get_prefab_from_address(address):
"""
Parses an address of the format ip[:port] and return return a prefab object connected to the remote node
"""
try:
if ':' in address:
ip, port = address.split(':')
port = int(port)
else:
ip, port = address, 22
except Exception:
raise ValueError("Invalid node address")
return j.tools.prefab.getFromSSH(addr=ip, port=port)
|
3520dcca249073433ece88a4d9b31e8c2d73eb86
| 3,640,532
|
def interval_to_errors(value, low_bound, hi_bound):
"""
Convert error intervals to errors
:param value: central value
:param low_bound: interval low bound
:param hi_bound: interval high bound
:return: (error minus, error plus)
"""
error_plus = hi_bound - value
error_minus = value - low_bound
return error_minus, error_plus
|
ffee403968ddf5fd976df79a90bdbb62474ede11
| 3,640,533
|
from typing import Any
from typing import cast
def log_enabled_arg(request: Any) -> bool:
"""Using different log messages.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(bool, request.param)
|
9ff97ab8f5cc8e3a0c548e613b75b5da050eb53d
| 3,640,534
|
def expsign(sign, exp):
"""
optimization of sign ** exp
"""
if sign == 1:
return 1
assert sign == -1
return -1 if exp % 2 else 1
|
d770aaa2a4d20c9530a213631047d1d0f9cca3f7
| 3,640,535
|
def convert_format(tensors, kind, target_kind):
"""Converts data from format 'kind' to one of the formats specified in 'target_kind'
This allows us to convert data to/from dataframe representations for operators that
only support certain reprentations
"""
# this is all much more difficult because of multihot columns, which don't have
# great representations in dicts of cpu/gpu arrays. we're representing multihots
# as tuples of (values, offsets) tensors in this case - but have to do work at
# each step in terms of converting.
if kind & target_kind:
return tensors, kind
elif target_kind & Supports.GPU_DICT_ARRAY:
if kind == Supports.CPU_DICT_ARRAY:
return _convert_array(tensors, cp.array), Supports.GPU_DICT_ARRAY
elif kind == Supports.CPU_DATAFRAME:
return _pandas_to_array(tensors, False), Supports.GPU_DICT_ARRAY
elif kind == Supports.GPU_DATAFRAME:
return _cudf_to_array(tensors, False), Supports.GPU_DICT_ARRAY
elif target_kind & Supports.CPU_DICT_ARRAY:
if kind == Supports.GPU_DICT_ARRAY:
return _convert_array(tensors, cp.asnumpy), Supports.CPU_DICT_ARRAY
elif kind == Supports.CPU_DATAFRAME:
return _pandas_to_array(tensors, True), Supports.CPU_DICT_ARRAY
elif kind == Supports.GPU_DATAFRAME:
return _cudf_to_array(tensors, True), Supports.CPU_DICT_ARRAY
elif target_kind & Supports.GPU_DATAFRAME:
if kind == Supports.CPU_DATAFRAME:
return cudf.DataFrame(tensors), Supports.GPU_DATAFRAME
return _array_to_cudf(tensors), Supports.GPU_DATAFRAME
elif target_kind & Supports.CPU_DATAFRAME:
if kind == Supports.GPU_DATAFRAME:
return tensors.to_pandas(), Supports.CPU_DATAFRAME
elif kind == Supports.CPU_DICT_ARRAY:
return _array_to_pandas(tensors), Supports.CPU_DATAFRAME
elif kind == Supports.GPU_DICT_ARRAY:
return _array_to_pandas(_convert_array(tensors, cp.asnumpy)), Supports.CPU_DATAFRAME
raise ValueError("unsupported target for converting tensors", target_kind)
|
8925d002395da05c6b5a7374a7288cc0511df1cb
| 3,640,536
|
import urllib
import re
def template2path(template, params, ranges=None):
"""Converts a template and a dict of parameters to a path fragment.
Converts a template, such as /{name}/ and a dictionary of parameter
values to a URL path (string).
Parameter values that are used for buildig the path are converted to
strings using `str()` and URI-escaped, then validated against the their
range. Unused parameters are ignored.
Any optional ([]) blocks in the template are skipped unless they contain at
least one parameter and all parameters needed to fill the block (including
nested blocks) are present in `params`.
Example:
>>> import rhino.mapper
>>> rhino.mapper.template2path("/{name}", {'name': 'fred'})
'/fred'
"""
if len(template) and -1 < template.find('|') < len(template) - 1:
raise InvalidTemplateError("'|' may only appear at the end, found at position %d in %s" % (template.find('|'), template))
if ranges is None:
ranges = DEFAULT_RANGES
# Stack for path components. A new list is added for each '[]' block
# encountered. When the closing ']' is reached, the last element is
# removed and either merged into the previous one (we keep the
# block) or discarded (we skip the block). At the end, this should
# contain a flat list of strings as its single element.
stack = [[]]
pattern = "[^/]+" # default range
name = "" # name of the current parameter
bracketdepth = 0 # current level of nested brackets
skip_to_depth = 0 # if > 1, skip until we're back at this bracket level
state = S_PATH
rangename = None # range name for the current parameter
seen_name = [False] # have we seen a named param in bracket level (index)?
for c in template_splitter.split(template):
if state == S_PATH:
if len(c) > 1:
stack[-1].append(c)
elif c == '[':
bracketdepth += 1
stack.append([])
seen_name.append(False)
elif c == ']':
bracketdepth -= 1
if bracketdepth < 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
last_elem = stack.pop()
if seen_name.pop():
stack[-1].extend(last_elem)
seen_name[-1] = True
elif c == '{':
name = ""
state = S_TEMPLATE
elif c == '}':
raise InvalidTemplateError("Mismatched braces in %s" % template)
elif c == '|':
pass
else:
stack[-1].append(c)
elif state == S_SKIP:
if c == '[':
bracketdepth += 1
seen_name.append(False)
elif c == ']':
if bracketdepth == skip_to_depth:
stack.pop()
skip_to_depth = 0
state = S_PATH
bracketdepth -= 1
seen_name.pop()
else: # state == S_TEMPLATE
if c == '}':
if name not in params:
if bracketdepth:
# We're missing a parameter, but it's ok since
# we're inside a '[]' block. Skip everything
# until we reach the end of the current block.
skip_to_depth = bracketdepth
state = S_SKIP
else:
raise InvalidArgumentError("Missing parameter '%s' in %s" % (name, template))
else:
if rangename and rangename in ranges:
regex = ranges[rangename]
else:
regex = pattern
value_bytes = unicode(params[name]).encode('utf-8')
value = urllib.quote(value_bytes, safe='/:;')
if not re.match('^' + regex + '$', value):
raise InvalidArgumentError("Value '%s' for parameter '%s' does not match '^%s$' in %s" % (value, name, regex, template))
stack[-1].append(value)
state = S_PATH
rangename = None
else:
name = c
if name.find(":") > -1:
name, rangename = name.split(":")
seen_name[bracketdepth] = True
if bracketdepth != 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
if state == S_TEMPLATE:
raise InvalidTemplateError("Mismatched braces in %s" % template)
# None of these Should Ever Happen [TM]
if state == S_SKIP: # pragma: no cover
raise MapperException("Internal error: end state is S_SKIP")
if len(stack) > 1: # pragma: no cover
raise MapperException("Internal error: stack not empty")
if len(seen_name) != 1: # pragma: no cover
raise MapperException("Internal error: seen_name not empty")
return "".join(stack[0])
|
daf628ab6ef1a6fddb612c0f4c817085ac23ce2c
| 3,640,537
|
from typing import Union
from typing import Dict
from typing import Any
def calculate_total_matched(
market_book: Union[Dict[str, Any], MarketBook]
) -> Union[int, float]:
"""
Calculate the total matched on this market from the amounts matched on each runner at each price point. Useful for historic data where this field is not populated
:param market_book: A market book either as a dictionary or betfairlightweight MarketBook object
:return: The total matched on this market
"""
if type(market_book) is MarketBook:
market_book = market_book._data
return sum(
ps["size"]
for r in market_book.get("runners", [])
for ps in r.get("ex", {}).get("tradedVolume", [])
)
|
7bc3d4680e5507d1400e94ab30213c0cc6d817bb
| 3,640,538
|
import re
def _newline_to_ret_token(instring):
"""Replaces newlines with the !RET token.
"""
return re.sub(r'\n', '!RET', instring)
|
4fcf60025f79811e99151019a479da04f25ba47c
| 3,640,540
|
def _ComputeLineCounts(old_lines, chunks):
"""Compute the length of the old and new sides of a diff.
Args:
old_lines: List of lines representing the original file.
chunks: List of chunks as returned by patching.ParsePatchToChunks().
Returns:
A tuple (old_len, new_len) representing len(old_lines) and
len(new_lines), where new_lines is the list representing the
result of applying the patch chunks to old_lines, however, without
actually computing new_lines.
"""
old_len = len(old_lines)
new_len = old_len
if chunks:
(_, old_b), (_, new_b), old_lines, _ = chunks[-1]
new_len += new_b - old_b
return old_len, new_len
|
ba99714016b69d87f260c8e7b8793468a2f7b04d
| 3,640,541
|
def _read_int(file_handle, data_size):
"""
Read a signed integer of defined data_size from file.
:param file_handle: The file handle to read from at current position
:param data_size: The data size in bytes of the integer to read
:returns: The integer read and decoded
"""
return int.from_bytes(file_handle.read(data_size), byteorder="little", signed=True)
|
4d2a7e82e9daa828c0e5b180250834f2fa9977d5
| 3,640,542
|
import numpy
def quaternion_to_matrix(quat):
"""OI
"""
qw = quat[0][0]
qx = quat[1][0]
qy = quat[2][0]
qz = quat[3][0]
rot = numpy.array([[1 - 2*qy*qy - 2*qz*qz, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw],
[2*qx*qy + 2*qz*qw, 1 - 2*qx*qx - 2*qz*qz, 2*qy*qz - 2*qx*qw],
[2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw, 1 - 2*qx*qx - 2*qy*qy]])
return rot
|
67f02ea97db1af4a763c3a97957f36de29da0157
| 3,640,543
|
def get_cart_from_request(request, create=False):
"""Returns Cart object for current user. If create option is True,
new cart will be saved to db"""
cookie_token = request.get_signed_cookie(
Cart.COOKIE_NAME, default=None)
if request.user.is_authenticated():
user = request.user
queryset = user.carts
token = get_user_open_cart_token(request.user)
else:
user = None
queryset = Cart.objects.anonymous()
token = cookie_token
try:
cart = queryset.open().get(token=token)
except Cart.DoesNotExist:
if create:
cart = Cart.objects.create(
user=user,
token=cookie_token)
else:
cart = Cart()
cart.discounts = request.discounts
return cart
|
d22c2587a20c12bac1fe713d40ddf069bfc5f40e
| 3,640,544
|
def make_concrete_rule(rule_no, zone_map, direction, zone, rule, concrete_port):
"""Take a rule and create a corresponding concrete rule."""
def make_rule(target_zone, port):
return ConcreteRule(source_rules=[rule], rule_no=rule_no, target_zone=target_zone,
direction=direction, port=port, action="allow")
target_zone = zone_map[rule.target_zone]
# Rule level ephemerality overrides zone level
if '+ephemeral_strict' in rule.tags:
ephem_start = 32768
elif '+ephemeral_loose' in rule.tags:
ephem_start = 1024
elif rule.direction == '>' and '+ephemeral_strict' in zone.tags and direction == 'ingress':
# An internal network with systems that use a tight ephemeral port range
ephem_start = 32768
else:
ephem_start = 1024
if concrete_port.proto == 'all':
# ISSUE: We should *maybe* prevent rules with the "all" protocol from being
# concretized. Because of the nature of "all" rules you can't restrict the
# return traffic at all. Really, this should be a policy level error?
return_port = ConcretePort(proto=concrete_port.proto, from_port=0, to_port=0)
else:
return_port = ConcretePort(proto=concrete_port.proto, from_port=ephem_start, to_port=65535)
if direction == 'ingress':
if rule.direction == '>':
if rule.zone == zone.name or rule.zone == 'all': # a > b (return traffic)
return make_rule(target_zone=rule.target_zone, port=return_port)
elif rule.target_zone == zone.name: # b > a (forward traffic)
return make_rule(target_zone=rule.zone, port=concrete_port)
else: # '<'
if rule.zone == zone.name: # a < b (forward traffic)
return make_rule(target_zone=rule.target_zone, port=concrete_port)
elif rule.target_zone == zone.name: # b < a
raise NotImplementedError("Receiving traffic from internal zone?")
else: # egress
if rule.direction == '>':
if rule.zone == zone.name or rule.zone == 'all': # a > b (forward traffic)
return make_rule(target_zone=rule.target_zone, port=concrete_port)
elif rule.target_zone == zone.name: # b > a (return traffic)
return make_rule(target_zone=rule.zone, port=return_port)
else: # '<'
if rule.zone == zone.name: # a < b (return traffic)
return make_rule(target_zone=rule.target_zone, port=return_port)
elif rule.target_zone == zone.name: # b < a
raise NotImplementedError("Receiving traffic from internal zone?")
raise AssertionError("should not reach here")
|
b7b1babc32c2d81193e62e90b5fd751ad8575ff1
| 3,640,545
|
from typing import List
def downcast(df: pd.DataFrame, signed_columns: List[str] = None) -> pd.DataFrame:
"""
Automatically check for signed/unsigned columns and downcast.
However, if a column can be signed while all the data in that column is unsigned, you don't want to downcast to
an unsigned column. You can explicitly pass these columns.
:arg df: Data as Pandas DataFrame
:arg signed_columns: List of signed columns (signed = positive and negative values, unsigned = only positive values).
"""
logger.info(f'Size before downcasting: {df.memory_size} KB')
for column in df.columns:
if df[column].dtype in [np.int8, np.int16, np.int32, np.int64]:
if (df[column] < 0).any() or (signed_columns is not None and df[column].name in signed_columns):
df[column] = pd.to_numeric(df[column], downcast='signed')
else:
df[column] = pd.to_numeric(df[column], downcast='unsigned')
elif df[column].dtype in [np.float16, np.float32, np.float64]:
df[column] = pd.to_numeric(df[column], downcast='float')
logger.info(f'Size after downcasting: {df.memory_size} KB')
return df
|
2eb2494e5a59630c4e20d114aac076c971f287a6
| 3,640,546
|
from typing import Optional
def entity_type(entity: dict) -> Optional[str]:
"""
Safely get the NGSI type of the given entity.
The type, if present, is expected to be a string, so we convert it if it
isn't.
:param entity: the entity.
:return: the type string if there's an type, `None` otherwise.
"""
return maybe_map(str, safe_get_value(entity, NGSI_TYPE))
|
e4d27b7499710951959cfef5c1191c6744bd02ce
| 3,640,548
|
def read_private_key_data(bio):
"""
Read enough data from bio to fully read a private key.
(The data read is thrown away, though.)
This is required since the format does not contain the actual length
of the privately-serialized private key data. The knowledge of what
to read for each key type is known by OpenSSH itself; see
https://github.com/openssh/openssh-portable/blob/c7670b091a7174760d619ef6738b4f26b2093301/sshkey.c#L2767
for the details.
:param bio: Seekable binary IO object to read from
:return: Tuple of (key format, private key data).
"""
key_format = read_openssh_string(bio)
start_idx = bio.tell()
reader = _readers.get(key_format.decode())
if not reader:
raise NotImplementedError('Unknown key format %r' % key_format)
reader(bio)
end_idx = bio.tell()
bytes_read = end_idx - start_idx
bio.seek(start_idx)
private_key_bytes = bio.read(bytes_read)
return (key_format, private_key_bytes)
|
de1e38c49fe81449b90b14ccab0b2aaf7de121bc
| 3,640,549
|
def list_check(lst):
"""Are all items in lst a list?
>>> list_check([[1], [2, 3]])
True
>>> list_check([[1], "nope"])
False
"""
t = [1 if isinstance(x, list) else 0 for x in lst]
return len(lst) == sum(t)
|
9e2c55cb6e15f89ff2b73a78d5f15310d3cac672
| 3,640,550
|
from typing import Dict
def build_encoded_manifest_from_nested_directory(
data_directory_path: str,
) -> Dict[str, EncodedVideoInfo]:
"""
Creates a dictionary from video_id to EncodedVideoInfo for
encoded videos in the given directory.
Args:
data_directory_path (str): The folder to ls to find encoded
video files.
Returns:
Dict[str, EncodedVideoInfo] mapping video_id to EncodedVideoInfo
for each file in 'data_directory_path'
"""
encoded_video_infos = {}
for participant_id in g_pathmgr.ls(data_directory_path):
participant_folder_path = f"{data_directory_path}/{participant_id}"
for video_file_name in g_pathmgr.ls(participant_folder_path):
video_id = video_file_name[:6]
video_full_path = f"{participant_folder_path}/{video_file_name}"
encoded_video_infos[video_id] = EncodedVideoInfo(video_id, video_full_path)
return encoded_video_infos
|
2a908eb33b140e73d27bca02da449d09e4ac4c5d
| 3,640,552
|
def derive_question(doc):
"""
Return a string that rephrases an action in the
doc in the form of a question.
'doc' is expected to be a spaCy doc.
"""
verb_chunk = find_verb_chunk(doc)
if not verb_chunk:
return None
subj = verb_chunk['subject'].text
obj = verb_chunk['object'].text
if verb_chunk['verb'].tag_ != 'VB':
# If the verb is not in its base form ("to ____" form),
# use the spaCy lemmatizer to convert it to such
verb = verb_chunk['verb'].lemma_
else:
verb = verb_chunk['verb'].text
question = "Why did {} {} {}?".format(subj, verb, obj)
return question
|
876e6733f8cf3d9accf3af1af89241ded4a02481
| 3,640,553
|
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred_variable = pred_variable[word_recover]
# print("reordered labels: {}".format(pred_variable))
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert(len(pred)==len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label
|
7f3efef4a0e9041e329c8d1c0c5641bf0c79ff58
| 3,640,554
|
def RegenerateOverview(*args, **kwargs):
"""
RegenerateOverview(Band srcBand, Band overviewBand, char const * resampling="average", GDALProgressFunc callback=0,
void * callback_data=None) -> int
"""
return _gdal.RegenerateOverview(*args, **kwargs)
|
8f05fcb7a12bf09d432b65b9cf049d2ff5cf23b1
| 3,640,555
|
import imp
def import_code(code, name):
""" code can be any object containing code -- string, file object, or
compiled code object. Returns a new module object initialized
by dynamically importing the given code. If the module has already
been imported - then it is returned and not imported a second time.
"""
# Check if 'code' has already been loaded
if (name in config.g_utils_import_dictionary):
return config.g_utils_import_dictionary[name]
# Load the 'code' into the memory
try:
module = imp.new_module(name)
config.g_utils_import_dictionary[name] = module
exec(code, module.__dict__)
return module
except Exception as e:
print("Error={}".format( str(e) ))
return None
|
309fb1e214225dcdf742bc5ea7d21cb502b05ae9
| 3,640,556
|
def two(data: np.ndarray) -> int:
"""
Use the binary numbers in your diagnostic report to calculate the oxygen generator rating and CO2 scrubber rating,
then multiply them together. What is the life support rating of the submarine? (Be sure to represent your answer in
decimal, not binary.)
"""
def loop(most_common: bool) -> int:
"""
Loop through each bit for both the Oxygen generator rating (True) and CO2 scrubber rating (False).
"""
n_bits = len(data[0])
rating_list = np.copy(data)
for pos in range(n_bits):
if len(rating_list) <= 1:
break
pos_data = rating_list[:, pos]
n_0, n_1 = (pos_data == 0).sum(), (pos_data == 1).sum()
if most_common:
bit = 1 if n_1 >= n_0 else 0
else:
bit = 0 if n_1 >= n_0 else 1
rating_list = rating_list[rating_list[:, pos] == bit]
return binary_to_int(rating_list[0])
return loop(most_common=True) * loop(most_common=False)
|
723984bf673ab23697ccff69e0c7e2529cce2e81
| 3,640,557
|
import six
def get_lr_fit(sess, model, x_train, y_train, x_test, num_steps=100):
"""Fit a multi-class logistic regression classifier.
Args:
x_train: [N, D]. Training data.
y_train: [N]. Training label, integer classes.
x_test: [M, D]. Test data.
Returns:
y_pred: [M]. Integer class prediction of test data.
"""
nbatches = x_train.shape[0]
y_pred = np.zeros([x_test.shape[0], x_test.shape[1]])
for ii in six.moves.xrange(nbatches):
x_train_ = x_train[ii].reshape([x_train[ii].shape[0], -1])
x_test_ = x_test[ii].reshape([x_test[ii].shape[0], -1])
y_train_ = y_train[ii]
# Reinitialize variables for a new episode.
var_to_init = list(
filter(lambda x: 'LRModel' in x.name, tf.global_variables()))
sess.run(tf.variables_initializer(var_to_init))
# Run LR training.
for step in six.moves.xrange(num_steps):
cost, acc, _ = sess.run(
[model.cost, model.acc, model.train_op],
feed_dict={
model.inputs: x_train_,
model.labels: y_train_
})
y_pred[ii] = np.argmax(
sess.run(model.prediction, feed_dict={
model.inputs: x_test_
}), axis=-1)
return y_pred
|
a60654d15e8f0f1c5e7ab11bc9c3e17f3440d286
| 3,640,558
|
import random
def make_block_trials(ntrials_block):
"""Creates a matrix of pseudo-random balanced trial parameters for a block of trials.
Parameters
----------
ntrials_block : int
Number of trials in the block.
Returns
-------
block : 2d array
Matrix of trial parameters (this is NOT random).
order : 1d array
Randomized order to run the trials in.
"""
## CREATE VECTORS OF TRIAL PARAMETER SETTINGS FOR A BLOCK OF TRIALS
# FOR EXAMPLE: COND_VEC = NP.APPEND(NP.ZEROS(NTRIAL_BLOCK/2), NP.ONES(NTRIAL_BLOCK/2))
# ^ CREATES A VECTOR TO HAVE 50% OF EACH OF TWO TRIAL CONDITIONS
# Collect run details into block object
block = Block()
# ADD BLOCK RUN
# EXAMPLE: block.CONDITION = COND_VEC
# Set up array for run order
order = range(0, len(ntrials_block))
random.shuffle(order)
return block, order
|
ed504af676a660befd3b548e9148e4a6cbc93183
| 3,640,559
|
def view_user(user_id: int):
"""Return the given user's history."""
return render_user(manager.get_user_by_id(user_id))
|
70b88f25b63697682650ae60591e4eee16253433
| 3,640,560
|
def first(c) -> col:
"""
In contrast to pyspark.sql.functions.first this function uses column name as alias
without prefixing it with the aggregation function name.
"""
if isinstance(c, str):
return F.first(c).alias(c)
columnName = c._jc.toString()
return F.first(c).alias(columnName)
|
0b7b0bb0d3e2f56c400f3a026f39cb2459b0e54f
| 3,640,561
|
def translate(root_list, use_bag_semantics=False):
"""
Translate a list of relational algebra trees into SQL statements.
:param root_list: a list of tree roots
:param use_bag_semantics: flag for using relational algebra bag semantics
:return: a list of SQL statements
"""
translator = (Translator() if use_bag_semantics else SetTranslator())
return [translator.translate(root).to_sql() for root in root_list]
|
b7a25d8af2e47ba134a6dbf490a0255391b330c1
| 3,640,562
|
import jsonschema
def replace_aliases(record):
"""
Replace all aliases associated with this DID / GUID
"""
# we set force=True so that if MIME type of request is not application/JSON,
# get_json will still throw a UserError.
aliases_json = flask.request.get_json(force=True)
try:
jsonschema.validate(aliases_json, RECORD_ALIAS_SCHEMA)
except jsonschema.ValidationError as err:
logger.warning(f"Bad request body:\n{err}")
raise UserError(err)
aliases = [record["value"] for record in aliases_json["aliases"]]
# authorization and error handling done in driver
blueprint.index_driver.replace_aliases_for_did(aliases, record)
aliases_payload = {"aliases": [{"value": alias} for alias in aliases]}
return flask.jsonify(aliases_payload), 200
|
a19335af1836f1899565b874640cdd0858247bcc
| 3,640,563
|
def pos_tag(docs, language=None, tagger_instance=None, doc_meta_key=None):
"""
Apply Part-of-Speech (POS) tagging to list of documents `docs`. Either load a tagger based on supplied `language`
or use the tagger instance `tagger` which must have a method ``tag()``. A tagger can be loaded via
:func:`~tmtoolkit.preprocess.load_pos_tagger_for_language`.
POS tagging so far only works for English and German. The English tagger uses the Penn Treebank tagset
(https://ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html), the
German tagger uses STTS (http://www.ims.uni-stuttgart.de/forschung/ressourcen/lexika/TagSets/stts-table.html).
:param docs: list of tokenized documents
:param language: the language for the POS tagger (currently only "english" and "german" are supported) if no
`tagger` is given
:param tagger_instance: a tagger instance to use for tagging if no `language` is given
:param doc_meta_key: if this is not None, it must be a string that specifies the key that is used for the
resulting dicts
:return: if `doc_meta_key` is None, return a list of N lists, where N is the number of documents; each of these
lists contains the POS tags for the respective tokens from `docs`, hence each POS list has the same length
as the respective token list of the corresponding document; if `doc_meta_key` is not None, the result list
contains dicts with the only key `doc_meta_key` that maps to the list of POS tags for the corresponding
document
"""
require_listlike(docs)
if tagger_instance is None:
tagger_instance, _ = load_pos_tagger_for_language(language or defaults.language)
docs_meta = []
for dtok in docs:
if len(dtok) > 0:
tokens_and_tags = tagger_instance.tag(dtok)
tags = list(list(zip(*tokens_and_tags))[1])
else:
tags = []
if doc_meta_key:
docs_meta.append({doc_meta_key: tags})
else:
docs_meta.append(tags)
return docs_meta
|
a990acc4caa33c7615c961593557b43ef6d5a6d0
| 3,640,564
|
def NOBE_GA_SH(G,K,topk):
"""detect SH spanners via NOBE-GA[1].
Parameters
----------
G : easygraph.Graph
An unweighted and undirected graph.
K : int
Embedding dimension k
topk : int
top - k structural hole spanners
Returns
-------
SHS : list
The top-k structural hole spanners.
Examples
--------
>>> NOBE_GA_SH(G,K=8,topk=5)
References
----------
.. [1] https://www.researchgate.net/publication/325004496_On_Spectral_Graph_Embedding_A_Non-Backtracking_Perspective_and_Graph_Approximation
"""
Y=eg.NOBE_GA(G,K)
if(isinstance(Y[0,0],complex)):
Y = abs(Y)
kmeans = KMeans(n_clusters=K, random_state=0).fit(Y)
com={}
cluster={}
a=0
for i in G.nodes:
com[i]=kmeans.labels_[a]
a+=1
for i in com:
if com[i] in cluster:
cluster[com[i]].append(i)
else:
cluster[com[i]]=[]
cluster[com[i]].append(i)
vector={}
a=0
for i in G.nodes:
vector[i]=Y[a]
a+=1
rds=RDS(com,cluster,vector,K)
rds_sort=sorted(rds.items(), key=lambda d: d[1],reverse=True)
SHS=list()
a=0
for i in rds_sort:
SHS.append(i[0])
a+=1
if a==topk:
break
return SHS
|
a1f3f8f041e4a89b9d09037479574c27505dd7fa
| 3,640,565
|
import torch
def calculate_correct_answers(model, dataloader, epoch):
"""Calculate correct over total answers"""
forward_backward_func = get_forward_backward_func()
for m in model:
m.eval()
def loss_func(labels, output_tensor):
logits = output_tensor
loss_dict = {}
# Compute the correct answers.
predicted = torch.argmax(logits, dim=-1)
corrects = (predicted == labels).float()
# Add to the counters.
loss_dict['total'] = labels.size(0)
loss_dict['correct'] = corrects.sum().item()
return 0, loss_dict
#defined inside to capture output_predictions
def correct_answers_forward_step(batch, model):
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
images, labels = process_batch(batch_)
# Forward model.
output_tensor = model(images)
return output_tensor, partial(loss_func, labels)
with torch.no_grad():
# For all the batches in the dataset.
total = 0
correct = 0
for _, batch in enumerate(dataloader):
loss_dicts = forward_backward_func(correct_answers_forward_step, batch, model,
optimizer=None, timers=None, forward_only=True)
for loss_dict in loss_dicts:
total += loss_dict['total']
correct += loss_dict['correct']
for m in model:
m.train()
# Reduce.
if mpu.is_pipeline_last_stage():
unreduced = torch.cuda.LongTensor([correct, total])
torch.distributed.all_reduce(unreduced,
group=mpu.get_data_parallel_group())
# Print on screen.
correct_ans = unreduced[0].item()
total_count = unreduced[1].item()
return correct_ans, total_count
|
24e3196cd172719d16524b0bbd6c0848fec3c44e
| 3,640,566
|
from typing import Dict
from typing import Tuple
from typing import Any
import re
def set_template_parameters(
template: Template, template_metadata: TemplateMetadata, input_parameters: Dict[str, str], interactive=False
):
"""Set and verify template parameters' values in the template_metadata."""
if interactive and not communication.has_prompt():
raise errors.ParameterError("Cannot use interactive mode with no prompt")
def validate(var: TemplateParameter, val) -> Tuple[bool, Any]:
try:
return True, var.convert(val)
except ValueError as e:
communication.info(str(e))
return False, val
def read_valid_value(var: TemplateParameter, default_value=None):
"""Prompt the user for a template variable and return a valid value."""
while True:
variable_type = f", type: {var.type}" if var.type else ""
enum_values = f", options: {var.possible_values}" if var.possible_values else ""
default_value = default_value or to_string(var.default)
val = communication.prompt(
f"Enter a value for '{var.name}' ({var.description}{variable_type}{enum_values})",
default=default_value,
show_default=var.has_default,
)
valid, val = validate(var, val)
if valid:
return val
missing_values = []
for parameter in sorted(template.parameters, key=lambda v: v.name):
name = parameter.name
is_valid = True
if name in input_parameters: # NOTE: Inputs override other values. No prompt for them in interactive mode
is_valid, value = validate(parameter, input_parameters[name])
elif interactive:
value = read_valid_value(parameter, default_value=template_metadata.metadata.get(name))
elif name in template_metadata.metadata:
is_valid, value = validate(parameter, template_metadata.metadata[name])
elif parameter.has_default: # Use default value if no value is available in the metadata
value = parameter.default
elif communication.has_prompt():
value = read_valid_value(parameter)
else:
missing_values.append(name)
continue
if not is_valid:
if not communication.has_prompt():
raise errors.TemplateUpdateError(f"Invalid value '{value}' for variable '{name}'")
template_metadata.metadata[name] = read_valid_value(parameter)
else:
template_metadata.metadata[name] = value
if missing_values:
missing_values_str = ", ".join(missing_values)
raise errors.TemplateUpdateError(f"Can't update template, it now requires variable(s): {missing_values_str}")
# NOTE: Ignore internal variables, i.e. __\w__
internal_keys = re.compile(r"^__\w+__$")
metadata_variables = {v for v in template_metadata.metadata if not internal_keys.match(v)} | set(
input_parameters.keys()
)
template_variables = {v.name for v in template.parameters}
unused_metadata_variables = metadata_variables - template_variables
if len(unused_metadata_variables) > 0:
unused_str = "\n\t".join(unused_metadata_variables)
communication.info(f"These parameters are not used by the template and were ignored:\n\t{unused_str}\n")
|
fb14c28f754305e6907cff40086b2ffe55a55526
| 3,640,567
|
def calc_roll_pitch_yaw(yag, zag, yag_obs, zag_obs, sigma=None):
"""Calc S/C delta roll, pitch, and yaw for observed star positions relative to reference.
This function computes a S/C delta roll/pitch/yaw that transforms the
reference star positions yag/zag into the observed positions
yag_obs/zag_obs. The units for these values must be in arcsec.
The ``yag`` and ``zag`` values correspond to the reference star catalog
positions. These must be a 1-d list or array of length M (number of
stars).
The ``yag_obs`` and ``zag_obs`` values must be either a 1-d or 2-d array
with shape M (single readout of M stars) or shape N x M (N rows of M
stars).
The ``sigma`` parameter can be None or a 1-d array of length M.
The algorithm is a simple but fast linear least-squared solution which uses
a small angle assumption to linearize the rotation matrix from
[[cos(th) -sin(th)], [sin(th), cos(th)]] to [[1, -th], [th, 1]].
In practice anything below 1.0 degree is fine.
:param yag: reference yag (list or array, arcsec)
:param zag: reference zag (list or array, arcsec)
:param yag_obs: observed yag (list or array, arcsec)
:param zag_obs: observed zag (list or array, arcsec)
:param sigma: centroid uncertainties (None or list or array, arcsec)
:returns: roll, pitch, yaw (degrees)
"""
yag = np.array(yag)
zag = np.array(zag)
yag_obs = np.array(yag_obs)
zag_obs = np.array(zag_obs)
if yag.ndim != 1 or zag.ndim != 1 or yag.shape != zag.shape:
raise ValueError('yag and zag must be 1-d and equal length')
if (yag_obs.ndim not in (1, 2) or zag.ndim not in (1, 2) or
yag_obs.shape != zag_obs.shape):
raise ValueError('yag_obs and zag_obs must be 1-d or 2-d and equal shape')
n_stars = len(yag)
if yag_obs.shape[-1] != n_stars or zag.shape[-1] != n_stars:
raise ValueError('inconsistent number of stars in yag_obs or zag_obs')
one_d = yag_obs.ndim == 1
if one_d:
yag_obs.shape = 1, n_stars
zag_obs.shape = 1, n_stars
outs = []
for yo, zo in zip(yag_obs, zag_obs):
out = _calc_roll_pitch_yaw(yag, zag, yo, zo, sigma=sigma)
outs.append(out)
if one_d:
roll, pitch, yaw = outs[0]
else:
vals = np.array(outs)
roll, pitch, yaw = vals[:, 0], vals[:, 1], vals[:, 2]
return roll, pitch, yaw
|
e1cf3c1377a3613b9ea1fc76e7c9eecac1a6e175
| 3,640,568
|
def make_query_abs(db, table, start_dt, end_dt, dscfg, mode, no_part=False, cols=None):
"""절대 시간으로 질의를 만듦.
Args:
db (str): DB명
table (str): table명
start_dt (date): 시작일
end_dt (date): 종료일
dscfg (ConfigParser): 데이터 스크립트 설정
mode: 쿼리 모드 ('count' - 행 수 구하기, 'preview' - 프리뷰)
no_part: 테이블에 파티션이 없음. 기본 False
cols: 명시적 선택 컬럼
"""
assert type(start_dt) is date and type(end_dt) is date
start_dt = start_dt.strftime('%Y%m%d')
end_dt = end_dt.strftime('%Y%m%d')
return _make_query(db, table, start_dt, end_dt, dscfg, mode, no_part, cols)
|
113049d37ceaf1cbf9b9149b1d3a4278dad96aa6
| 3,640,569
|
def validate_task_rel_proposal(header, propose, rel_address, state):
"""Validates that the User exists, the Task exists, and the User is not
in the Task's relationship specified by rel_address.
Args:
header (TransactionHeader): The transaction header.
propose (ProposeAddTask_____): The Task relationship proposal.
rel_address (str): The Task relationship address produced by the Task
and the User.
state (sawtooth_sdk.Context): The way to communicate to the validator
the state gets and sets.
Returns:
(dict of addresses)
"""
user_address = addresser.make_user_address(propose.user_id)
task_address = addresser.make_task_attributes_address(propose.task_id)
proposal_address = addresser.make_proposal_address(
object_id=propose.task_id,
related_id=propose.user_id)
state_entries = get_state(state, [user_address,
task_address,
proposal_address,
rel_address])
validate_identifier_is_user(state_entries,
identifier=propose.user_id,
address=user_address)
user_entry = get_state_entry(state_entries, user_address)
user = get_user_from_container(
return_user_container(user_entry),
propose.user_id)
if header.signer_public_key not in [user.user_id, user.manager_id]:
raise InvalidTransaction(
"Txn signer {} is not the user or the user's "
"manager {}".format(header.signer_public_key,
[user.user_id, user.manager_id]))
validate_identifier_is_task(state_entries,
identifier=propose.task_id,
address=task_address)
try:
task_admins_entry = get_state_entry(state_entries, rel_address)
task_rel_container = return_task_rel_container(task_admins_entry)
if is_in_task_rel_container(
task_rel_container,
propose.task_id,
propose.user_id):
raise InvalidTransaction(
"User {} is already in the Role {} "
"relationship".format(propose.user_id,
propose.task_id))
except KeyError:
# The task rel container doesn't exist so no task relationship exists
pass
return state_entries
|
d9511f0cad43cbb7a2bc9c08b9f1d112d2d4bf7b
| 3,640,571
|
import json
def all_cells_run(event_str: str, expected_count: int) -> bool:
"""Wait for an event signalling all cells have run.
`execution_count` should equal number of nonempty cells.
"""
try:
event = json.loads(event_str)
msg_type = event["msg_type"]
content = event["content"]
execution_count = content["execution_count"]
status = content["status"]
except (TypeError, KeyError):
return False
return all(
(
msg_type == "execute_reply",
execution_count == expected_count,
status == "ok",
)
)
|
c3e1bb23f38ffdd09d4cc2ea3326d40b7cf54034
| 3,640,572
|
from typing import Union
def to_forecasting(
timeseries: np.ndarray,
forecast: int = 1,
axis: Union[int, float] = 0,
test_size: int = None,
):
"""Split a timeseries for forecasting tasks.
Transform a timeseries :math:`X` into a series of
input values :math:`X_t` and a series of output values
:math:`X_{t+\\mathrm{forecast}}`.
It is also possible to split the timeseries between training
timesteps and testing timesteps.
Parameters
----------
timeseries : np.ndarray
Timeseries to split.
forecast : int, optional
Number of time lag steps between
the timeseries :math:`X_t` and the timeseries
:math:`X_{t+\\mathrm{forecast}}`, by default 1,
i.e. returns two timeseries with a time difference
of 1 timesteps.
axis : int, optional
Time axis of the timeseries, by default 0
test_size : int or float, optional
If set, will also split the timeseries
into a training phase and a testing phase of
``test_size`` timesteps. Can also be specified
as a float ratio, by default None
Returns
-------
tuple of numpy.ndarray
:math:`X_t` and :math:`X_{t+\\mathrm{forecast}}`.
If ``test_size`` is specified, will return:
:math:`X_t`, :math:`X_t^{test}`,
:math:`X_{t+\\mathrm{forecast}}`, :math:`X_{t+\\mathrm{forecast}}^{test}`.
The size of the returned timeseries is therefore the size of
:math:`X` minus the forecasting length ``forecast``.
Raises
------
ValueError
If ``test_size`` is a float, it must be in [0, 1[.
"""
series_ = np.moveaxis(timeseries.view(), axis, 0)
time_len = series_.shape[0]
if test_size is not None:
if isinstance(test_size, float) and test_size < 1 and test_size >= 0:
test_len = round(time_len * test_size)
elif isinstance(test_size, int):
test_len = test_size
else:
raise ValueError(
"invalid test_size argument: "
"test_size can be an integer or a float "
f"in [0, 1[, but is {test_size}."
)
else:
test_len = 0
X = series_[:-forecast]
y = series_[forecast:]
if test_len > 0:
X_t = X[-test_len:]
y_t = y[-test_len:]
X = X[:-test_len]
y = y[:-test_len]
X = np.moveaxis(X, 0, axis)
X_t = np.moveaxis(X_t, 0, axis)
y = np.moveaxis(y, 0, axis)
y_t = np.moveaxis(y_t, 0, axis)
return X, X_t, y, y_t
return np.moveaxis(X, 0, axis), np.moveaxis(y, 0, axis)
|
7d77df1f52ee5a499b635dd9575ab08afaa7dda2
| 3,640,573
|
def build_task_environment() -> dm_env.Environment:
"""Returns the environment."""
# We first build the base task that contains the simulation model as well
# as all the initialization logic, the sensors and the effectors.
task, components = task_builder.build_task()
del components
env_builder = subtask_env_builder.SubtaskEnvBuilder()
env_builder.set_task(task)
# Build a composer environment.
task_env = env_builder.build_base_env()
# Define the action space. This defines what action spec is exposed to the
# agent along with how to project the action received by the agent to the one
# exposed by the composer environment. Here the action space is a collection
# of actions spaces, one for the arm and one for the gripper.
parent_action_spec = task.effectors_action_spec(physics=task_env.physics)
robot_action_spaces = []
for rbt in task.robots:
# Joint space control of each individual robot.
joint_action_space = action_spaces.ArmJointActionSpace(
af.prefix_slicer(parent_action_spec, rbt.arm_effector.prefix))
gripper_action_space = action_spaces.GripperActionSpace(
af.prefix_slicer(parent_action_spec, rbt.gripper_effector.prefix))
# Gripper isn't controlled by the agent for this task.
gripper_action_space = af.FixedActionSpace(
gripper_action_space,
gripper_action_space.spec().minimum)
robot_action_spaces.extend([joint_action_space, gripper_action_space])
env_builder.set_action_space(
af.CompositeActionSpace(robot_action_spaces))
# We add a preprocessor that casts all the observations to float32
env_builder.add_preprocessor(observation_transforms.CastPreprocessor())
env_builder.add_preprocessor(
rewards.L2Reward(obs0='robot0_tcp_pos', obs1='robot1_tcp_pos'))
# End episodes after 100 steps.
env_builder.add_preprocessor(subtask_termination.MaxStepsTermination(100))
return env_builder.build()
|
91618e066ef92a396ea2dc8f6ff36c9a98356e29
| 3,640,574
|
def searchInsert(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
try:
return nums.index(target)
except ValueError:
nums.append(target)
nums.sort()
return nums.index(target)
|
56a719b1595502a773c108d26c597fb5ac0201bb
| 3,640,575
|
def resource(author, tag) -> Resource:
"""Resource fixture"""
return Resource(
name="Sentiment Algorithm",
url="https://raw.githubusercontent.com/MarcSkovMadsen/awesome-streamlit/master/src/pages/gallery/contributions/marc_skov_madsen/sentiment_analyzer/sentiment_analyzer.py",
is_awesome=True,
tags=[tag],
author=author,
)
|
b4eb6bd4c8409e83d0ebb75f0dc390ce7d669512
| 3,640,576
|
def model_softmax(input_data=None,
output_targets=None,
num_words=3000,
num_units=128,
num_layers=2,
num_tags=5,
batchsize=1,
train=True
):
"""
:param input_data:
:param output_targets:
:param num_words:
:param num_units:
:param num_layers:
:param num_tags:标签数量
:param batchsize:
:param train: 训练还是预测
:return:
"""
tensors = {}
with tf.name_scope('embedding'):
w = tf.Variable(tf.random_uniform([num_words, num_units], -1.0, 1.0), name="W")
# 词向量shape [?,?,num_units]
inputs = tf.nn.embedding_lookup(w, input_data)
with tf.name_scope('lstm'):
lstmcell = tf.nn.rnn_cell.BasicLSTMCell
cell_list = [lstmcell(num_units, state_is_tuple=True) for i in range(num_layers)]
cell_mul = tf.nn.rnn_cell.MultiRNNCell(cell_list, state_is_tuple=True)
initial_state = cell_mul.zero_state(batch_size=batchsize, dtype=tf.float32)
# 序列输出shape [?,?,num_units]
outputs, last_state = tf.nn.dynamic_rnn(cell_mul, inputs, initial_state=initial_state)
with tf.name_scope('softmax'):
output = tf.reshape(outputs, [-1, num_units])
weights = tf.Variable(tf.truncated_normal([num_units, num_tags]))
bias = tf.Variable(tf.zeros(shape=[num_tags]))
logits = tf.nn.bias_add(tf.matmul(output, weights), bias=bias)
prediction = tf.reshape(tf.argmax(logits, axis=1, output_type=tf.int32),
shape=[batchsize, -1])
# 训练的时候计算loss,target用独热编码;生成的时候只需要计算logits
if train:
with tf.name_scope('loss'):
labels = tf.reshape(output_targets, [-1])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
total_loss = tf.reduce_mean(loss)
accu = tf.reduce_mean(tf.cast(tf.equal(output_targets, prediction),
dtype=tf.float32))
train_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
tensors['initial_state'] = initial_state
tensors['output'] = output
tensors['last_state'] = last_state
tensors['train_op'] = train_op
tensors['prediction'] = prediction
tensors['loss'] = total_loss
tensors['accu'] = accu
else:
# 和CRF的输出保持统一
tensors['prediction'] = prediction
return tensors
|
a3991206b0cdae621e1095a1d1dc4493d600bc26
| 3,640,578
|
from typing import AnyStr
from typing import List
from typing import Dict
def get_metric_monthly_rating(metric: AnyStr,
tenant_id: AnyStr,
namespaces: List[AnyStr]) -> List[Dict]:
"""
Get the monthly price for a metric.
:metric (AnyStr) A string representing the metric.
:tenant_id (AnyStr) A string representing the tenant, only used by decorators.
:namespaces (List[AnyStr]) A list of namespaces accessible by the tenant.
Return the results of the query as a list of dictionary.
"""
qry = sa.text("""
SELECT max(frame_price) * 24 *
(SELECT extract(days FROM
date_trunc('month', now()) + interval '1 month - 1 day'))
AS frame_price
FROM frames
WHERE metric = :metric
AND frame_begin >= date_trunc('month', now())
AND namespace IN :namespaces
""").bindparams(bindparam('namespaces', expanding=True))
params = {
'metric': metric,
'tenant_id': tenant_id,
'namespaces': namespaces
}
return process_query(qry, params)
|
e73c56015a9320e9ce08b0f1375a7cea70dcc0f0
| 3,640,579
|
def masked_softmax_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= tf.transpose(mask)
return tf.reduce_mean(tf.transpose(loss))
|
f95f917ff4dd5835c84167f7bf3ea76a4cf6536b
| 3,640,580
|
def u0(x):
"""
Initial Condition
Parameters
----------
x : array or float;
Real space
Returns
-------
array or float : Initial condition evaluated in the real space
"""
return sin(pi * x)
|
bd55cc7226a4d2ca941b8718d62025f6f2e157b6
| 3,640,581
|
import json
def jsonify(value):
"""
Convert a value into a JSON string that can be used for JSONB queries in
Postgres.
If a string happens to contain the character U+0000, which cannot be
represented in a PostgreSQL value, remove the escape sequence representing
that character, effectively stripping out that character from all strings.
"""
return json.dumps(value, ensure_ascii=False).replace("\\u0000", "")
|
7fff497b302822f8f79f0e68b2576c26458df99c
| 3,640,582
|
def generate_dataset(type = 'nlp', test=1):
"""
Generates a dataset for the model.
"""
if type == 'nlp':
return generate_nlp_dataset(test=test)
elif type == 'non-nlp':
return generate_non_nlp_dataset()
|
5e8998a6c9e10775367be3d6d4a722f3e24c6be1
| 3,640,584
|
def getAsciiFileExtension(proxyType):
"""
The file extension used for ASCII (non-compiled) proxy source files
for the proxies of specified type.
"""
return '.proxy' if proxyType == 'Proxymeshes' else '.mhclo'
|
cb2b27956b3066d58c7b39efb511b6335b7f2ad6
| 3,640,586
|
def dist(s1, s2):
"""Given two strings, return the Hamming distance (int)"""
return abs(len(s1) - len(s2)) + sum(
map(lambda p: 0 if p[0] == p[1] else 1, zip(s1.lower(), s2.lower())))
|
ef7b3bf24e24a2e49f0c7acfd7bcb8f23fa9af2e
| 3,640,587
|
import pickle
def read_bunch(path):
""" read bunch.
:param path:
:return:
"""
file = open(path, 'rb')
bunch = pickle.load(file)
file.close()
return bunch
|
aec87c93e20e44ddeeda6a8dfaf37a61e837c714
| 3,640,588
|
def cluster_analysis(L, cluster_alg, args, kwds):
"""Given an input graph (G), and whether the graph
Laplacian is to be normalized (True) or not (False) runs spectral clustering
as implemented in scikit-learn (empirically found to be less effective)
Returns Partitions (list of sets of ints)
"""
labels = cluster_alg(*args, **kwds).fit_predict(L)
num_clusters = np.max(labels) + 1
partitions = [set() for _ in range(num_clusters)]
outliers = set() # mechanisms only used in DBSCAN (i.e. where vertex gets no label)
for i, guess in enumerate(labels):
if guess == -1:
outliers.add(i)
else:
partitions[guess].add(i)
return partitions, outliers
|
83114156a0b5517d31e2b2c2ffb7fc0837098db8
| 3,640,589
|
def col_index_list(info, key, value):
"""Given a list of dicts 'info', return a list of indices corresponding to
columns in which info[key] == value. Use to build lists of default columns,
non-exportable columns, etc."""
index_list = list()
if info != None:
for i in range(0, len(info)):
if info[i].get(key) == value:
index_list.append(i)
return index_list
|
af46b03c2fe5bce2ceb7305fd670ce1f0f52ae38
| 3,640,590
|
def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64`
in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
|
dcae4206bdcb147d5bdd4611170f12ba4e371d70
| 3,640,591
|
def retr_radihill(smax, masscomp, massstar):
"""
Return the Hill radius of a companion
Arguments
peri: orbital period
rsma: the sum of radii of the two bodies divided by the semi-major axis
cosi: cosine of the inclination
"""
radihill = smax * (masscomp / 3. / massstar)**(1. / 3.) # [AU]
return radihill
|
5010f66026db7e2544b85f70fd1449f732c024b4
| 3,640,593
|
def load_feature_file(in_feature):
"""Load the feature file into a pandas dataframe."""
f = pd.read_csv(feature_path + in_feature, index_col=0)
return f
|
95bb40cc381dab3c29cf81c40d308104e9e4035b
| 3,640,594
|
def add_observation_noise(obs, noises, stds, only_object_noise=False):
"""Add noise to observations
`noises`: Standard normal noise of same shape as `obs`
`stds`: Standard deviation per dimension of `obs` to scale noise with
"""
assert obs.shape == noises.shape
idxs_object_pos = SENSOR_INFO_PNP["object_pos"]
agent_vel = obs[..., SENSOR_INFO_PNP["grip_velp"]]
obs = obs.copy()
if only_object_noise:
obs[..., idxs_object_pos] += (
noises[..., idxs_object_pos] * stds[..., idxs_object_pos]
)
else:
obs += noises * stds
# Recompute relative position
obs[..., SENSOR_INFO_PNP["object_rel_pos"]] = (
obs[..., SENSOR_INFO_PNP["object_pos"]] - obs[..., SENSOR_INFO_PNP["grip_pos"]]
)
# Recompute relative speed: first add old agent velocity to get noisy
# object velocity, then subtract noisy agent velocity to get correct
# relative speed between noisy measurements
obs[..., SENSOR_INFO_PNP["object_velp"]] = (
obs[..., SENSOR_INFO_PNP["object_velp"]]
+ agent_vel
- obs[..., SENSOR_INFO_PNP["grip_velp"]]
)
return obs
|
926de82261b6cbd702e3f19f201f82c1a94ca72b
| 3,640,595
|
import json
def test_domains(file_path="../../domains.json"):
"""
Reads a list of domains and see if they respond
"""
# Read file
with open(file_path, 'r') as domain_file:
domains_json = domain_file.read()
# Parse file
domains = json.loads(domains_json)
results = {}
for domain in domains:
status = check_status_code(domain)
results[domain] = status
return results
|
69c6792ee86e90dfdf08a866d2d8e04022dde8c7
| 3,640,596
|
from typing import Dict
from typing import Any
def mix_dirichlet_noise(distribution: Dict[Any, float],
epsilon: float,
alpha: float) -> Dict[Any, float]:
"""Combine values in dictionary with Dirichlet noise. Samples
dirichlet_noise according to dirichlet_alpha in each component. Then
updates the value v for key k with (1-epsilon) * v + epsilon * noise_k.
Parameters
----------
distribution
Dictionary with floats as values.
epsilon
Mixes the prior probabilities for starting_node with Dirichlet
noise. Uses (1 - dirichlet_epsilon) * prior_prob +
dirichlet_epsilon * dirichlet_noise, where dirichlet_noise is
sampled from the Dirichlet distribution with parameter dirichlet_alpha.
Set to 0.0 if no Dirichlet perturbation.
alpha
The parameter to sample the Dirichlet distribution with.
Returns
-------
dict
The dictionary with perturbed values.
"""
noise = np.random.dirichlet([alpha] * len(distribution))
return {k: (1 - epsilon) * v + epsilon * noise
for ((k, v), noise) in zip(distribution.items(), noise)}
|
f779566b27107f86a92952470c949c69edb623be
| 3,640,597
|
def get_video_ID(video_url: str) -> str:
"""Returns the video ID of a youtube video from a URL"""
try:
return parse_qs(urlparse(video_url).query)['v'][0]
except KeyError:
# The 'v' key isn't there, this could be a youtu.be link
return video_url.split("/")[3][:11]
|
c185a6c5a2c8a5bb4e2d6efd57f325023b030cda
| 3,640,598
|
def profiling_csv(stage, phases, durations):
"""
Dumps the profiling information into a CSV format.
For example, with
stage: `x`
phases: ['a', 'b', 'c']
durations: [1.42, 2.0, 3.4445]
The output will be:
```
x,a,1.42
x,b,2.0
x,c,3.444
```
"""
assert all(hasattr(p, "name") for p in phases), "expected to have name attribute."
return "\n".join(
[f"{stage},{p.name},{round(t, 3)}" for (p, t) in zip(phases, durations)]
)
|
d40ee5601aa201904741870ce75c4b5bfde0f9bc
| 3,640,599
|
def int_not_in_range(bounds, inclusive=False):
"""Creates property that must be an int outside bounds[0] and bounds[1].
Parameters:
bounds: Subscriptable with len()==2, where bounds[0] is the lower
bound and bounds[1] is the upper bound.
Requires bounds[1] > bounds[0].
inclusive (bool): If set to False, values falling on the upper and
lower bounds will not be accepted. Can set one bound to be
inclusive and the other exclusive by setting this to a tuple
of 2 bools, e.g. (True,False) makes the lower bound inclusive
while the upper bound is not.
Returns:
property
"""
return not_in_range(bounds, inclusive, type_constraint=int)
|
6890cd827fb741329c958a001e48013466414d11
| 3,640,600
|
from typing import Dict
from typing import List
def plot_concordance_pr(
pr_df: pd.DataFrame,
snv: bool,
colors: Dict[str, str] = None,
size_prop: str = None,
bins_to_label: List[int] = None,
) -> Column:
"""
Generates plots showing Precision/Recall curves for truth samples:
Two tabs:
- One displaying the PR curve with ranking computed on the entire data
- One displaying the PR curve with ranking computed on the truth sample only
Within each tab, a row of n_truth_samples.
The input to this function should come out of the `get_binned_concordance_pd` function, which creates
a DataFrame containing the necessary metris for PR plotting and is grouped by 'rank_name', 'truth_sample', 'model' and 'snv'.
:param DataFrame pr_df: Input Dataframe
:param bool snv: Whether to plot SNVs or Indels
:param dict of str -> str colors: Optional colors to use (model name -> desired color)
:param str size_prop: Either 'radius' or 'area' can be specified. If either is specified, the points will be sized proportionally to the amount of data in that point.
:param list of int bins_to_label: Bins to label
:return: Bokeh grid of plots
:rtype: Tabs
"""
if colors is None:
# Get a palette automatically
models = sorted(list(set([g[2] for g in pr_df.groups])))
palette = d3['Category10'][max(3, len(models))]
colors = {model: palette[i] for i, model in enumerate(models)}
hover = HoverTool(
tooltips=[
('model', '@model'),
('bin', '@bin'),
('score (min, max)', '(@min_score, @max_score)'),
('n_alleles', '@n_alleles'),
('cum_alleles', '@cum_alleles'),
('data (x,y)', '($x, $y)'),
]
)
tabs = []
for rank in ['truth_sample_rank', 'global_rank']:
plot_row = []
for truth_sample in set([g[1] for g in pr_df.groups]):
p = figure(
title=truth_sample[0].upper() + truth_sample[1:],
x_axis_label='Recall',
y_axis_label='Precision',
tools=[hover] + [tool for tool in TOOLS.split(',') if tool != 'hover'],
)
p.xaxis[0].formatter = NumeralTickFormatter(format='0%')
p.yaxis[0].formatter = NumeralTickFormatter(format='0.0%')
circles = []
for model in set([g[2] for g in pr_df.groups]):
data = pr_df.get_group((rank, truth_sample, model, snv)).copy()
data['model'] = [model] * len(data)
data['size'] = get_point_size_col(data['n_alleles'], size_prop)
source = ColumnDataSource(data)
circles.append(
(
model,
[
p.circle(
'recall',
'precision',
size='size',
color=colors[model],
source=source,
)
],
)
)
if bins_to_label is not None:
label_data = data.loc[data.bin.isin(bins_to_label)].copy()
label_data['x_offset'] = label_data['recall'] + 0.025
label_data['y_offset'] = label_data['precision']
label_data['bin_str'] = [str(int(t)) for t in label_data['bin']]
label_source = ColumnDataSource(label_data)
p.add_layout(
LabelSet(
x='x_offset',
y='precision',
text='bin_str',
text_color=colors[model],
source=label_source,
)
)
p.multi_line(
xs=[[x, x + 0.05] for x in label_data.recall],
ys=[[y, y] for y in label_data.precision],
color=colors[model],
)
legend = Legend(
items=circles,
orientation='horizontal',
location=(0, 0),
click_policy='hide',
)
p.add_layout(legend, 'above')
_set_plots_defaults(p)
plot_row.append(p)
tabs.append(Panel(child=Row(children=plot_row), title=rank))
return Tabs(tabs=tabs)
|
8ad9541605c8f9f274faba03de7e19766341a562
| 3,640,601
|
from enum import Enum
def typehint_metavar(typehint):
"""Generates a metavar for some types."""
metavar = None
if typehint == bool:
metavar = '{true,false}'
elif is_optional(typehint, bool):
metavar = '{true,false,null}'
elif _issubclass(typehint, Enum):
enum = typehint
metavar = '{'+','.join(list(enum.__members__.keys()))+'}'
elif is_optional(typehint, Enum):
enum = typehint.__args__[0]
metavar = '{'+','.join(list(enum.__members__.keys())+['null'])+'}'
return metavar
|
31b42c29dd970d561917420e789eea6a7bd84cfa
| 3,640,602
|
from datetime import datetime
def generate_signed_url(filename):
"""
Generate a signed url to access publicly
"""
found_blob = find(filename)
expiration = datetime.now() + timedelta(hours=1)
return found_blob.generate_signed_url(expiration)
|
917f78cfa12496baf655a8ea707143b4922f99c0
| 3,640,603
|
def delete_old_layer_versions(client, table, region, package, prefix):
"""
Loops through all layer versions found in DynamoDB and deletes layer version if it's <maximum_days_older> than
latest layer version.
The latest layer version is always kept
Because lambda functions are created at a maximum rate of once per day, a maximum of 14 layers can exists at one
time.
"""
deleted_arns = []
layer_name = f"{prefix}{package}"
# Get deployed layer versions
deployed_layer_version_arns = list_layer_version_arns(client=client,
layer_name=layer_name)
# Get Live Layer versions (they automatically delete if they're old)
response = table.query(KeyConditionExpression=Key("deployed_region-package").eq(f"{region}.{package}"),
ScanIndexForward=False)
live_layer_version_arns = [item['layer_version_arn'] for item in response['Items']]
# Delete layer versions
for layer_version_arn in deployed_layer_version_arns:
if layer_version_arn not in live_layer_version_arns:
logger.info(f"Found dead layer version {layer_version_arn}...deleting")
layer_version = layer_version_arn.split(":")[-1]
client.delete_layer_version(
LayerName=layer_name,
VersionNumber=layer_version
)
deleted_arns.append(layer_version_arn)
else:
pass
return deleted_arns
|
291afac422a37cad59a8c2128776567b24c5a0c1
| 3,640,604
|
def _run_simulation(sim_desc):
"""Since _run_simulation() is always run in a separate process, its input
and output params must be pickle-friendly. Keep that in mind when
making changes.
This is what each worker executes.
Given a SimulationDescription object, calls the sequence & binning
code, traps any errors that arise and grabs results. Also verfies that
the results meet our criteria (e.g. converts to tuples/lists if necessary,
raises an exception if the ppms, areas and phases arrays are not all the
same length, etc.)
If an exception is raised at any point, it sets _worker_exception.
Returns a result dict. If an exception occurred, the repackaged
exception is in result["exception"].
"""
started = util_time.now()
# I make a copy of dims because I need to return them as part of the
# result dict, and the pulse seq code might alter or even delete what's
# attached to the sim_desc.
dims = sim_desc.dims[:]
exception = False
# Execute the user's sequence code
try:
result = _sequence_function(sim_desc)
except:
exception = _repackage_exception(SEQUENCE_CODE_ALIAS)
if not exception:
# Sequence code completed OK.
if result:
# Sequence code returned the result. There's no need to
# execute the binning code.
pass
else:
# Execute the user's binning code
try:
result = _binning_function(sim_desc)
except:
exception = _repackage_exception(BINNING_CODE_ALIAS)
if exception:
result = EMPTY_RESULT
else:
# Execution completed with no errors. Let's see if what was returned
# meets our criteria. First, the result must be an N-tuple, where
# N == RESULT_LENGTH. As of this writing, RESULT_LENGTH == 3.
result_length = _safe_len(result)
if result_length != RESULT_LENGTH:
result = EMPTY_RESULT
# I force an error here so I can get the exception including a traceback.
try:
raise ValueError("Result returned from your code must be a %d-tuple, but has length %d" % \
(RESULT_LENGTH, result_length))
except ValueError:
exception = _repackage_exception(GENERIC_CODE_ALIAS)
# Our second criteria is that each element of the 3-tuple must be the
# same length.
lengths = [_safe_len(element) for element in result]
for length in lengths:
if length != lengths[0]:
result = EMPTY_RESULT
# I force an error here so I can get the exception including a traceback.
try:
raise ValueError("Result elements differ in length: %s" % lengths)
except ValueError:
exception = _repackage_exception(GENERIC_CODE_ALIAS)
# The user's code is required to return a tuple of iterables. Those
# iterables might be lists, numpy arrays, PyGAMMA.DoubleVectors or any
# number of other things. PyGAMMA objects in particular don't pickle, and
# this function's result needs to be pickleable.
# So here we ensure that the result contains only ordinary Python objects.
result = list(map(_tuplify, result))
# Last but not least, ensure that each value is numeric and a native
# Python type.
f = lambda an_object: isinstance(an_object, (float, int))
# Loop through ppms, areas & phases lists
for result_chunk in result:
# map() allows me to test all the items in the list in one shot.
if not all(map(f, result_chunk)):
# Ooops, at least one of the results in this list isn't a float,
# int, or long.
# I force an error here so I can get the exception including
# a traceback.
try:
raise ValueError("Results must contain only floats, ints or longs")
except ValueError:
exception = _repackage_exception(GENERIC_CODE_ALIAS)
# The result (good or bad) is returned as a dict.
result = dict(list(zip(("ppms", "areas", "phases"), result)))
result["started"] = started
result["completed"] = util_time.now()
result["metabolite"] = dims[0]
result["dims"] = dims[1:]
if exception:
_worker_exception.value = 1
result["exception"] = exception
return result
|
16cacdb3eaf1fadff8769ab6316eb06e89c226eb
| 3,640,605
|
def view_filestorage_file(self, request):
""" Renders the given filestorage file in the browser. """
return getattr(request.app, self.storage).getsyspath(self.path)
|
ad65b83b9462c8b8efec7626d4751685df3aba8b
| 3,640,606
|
def enum_choice_list(data):
""" Creates the argparse choices and type kwargs for a supplied enum type or list of strings """
# transform enum types, otherwise assume list of string choices
if not data:
return {}
try:
choices = [x.value for x in data]
except AttributeError:
choices = data
def _type(value):
return next((x for x in choices if x.lower() == value.lower()), value) if value else value
params = {
'choices': CaseInsensitiveList(choices),
'type': _type
}
return params
|
4f91c76569a4b42e655ed198a5c4ec2e48d9e839
| 3,640,607
|
def chartset(request):
""" Conjunto de caracteres que determian la pagina
request: respuesta de la url"""
print "--------------- Obteniendo charset -------------------"
try:
charset = request.encoding
except AttributeError as error_atributo:
charset = "NA"
print "charset: " + str(error_atributo)
return charset
|
072ec863bd555706a64bab48d147afb24142fae4
| 3,640,608
|
import uuid
def generate_UUID():
"""
Generate a UUID and return it
"""
return str(uuid.uuid4())
|
feab11861e366ddf60cdc74c12f77f6a6ece2fa3
| 3,640,609
|
def streaming_recall_at_thresholds(predictions, labels, thresholds,
ignore_mask=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds.
`recall[i]` is defined as the number of values in `predictions` above
`thresholds[i]` whose corresponding entry in `labels` is `True`
(`true_positives[i]`) divided by the number of True values in `labels`
(`true_positives[i] + false_negatives[i]`).
If `ignore_mask` is not None then only values whose corresponding value in
`ignore_mask` is `False` are considered.
`recall` are returned along with an `update_op` whose value equals that of
`recall`.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A binary `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
ignore_mask: An optional, binary tensor whose size matches `predictions`.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_op_scope name.
Returns:
recall: A float tensor of shape [len(thresholds)].
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If the shape of `predictions` and `labels` do not match or if
`ignore_mask` is not `None` and its shape doesn't match `predictions`
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_op_scope([predictions, labels], name,
'recall_at_thresholds'):
(true_positives, false_negatives, _, _, true_positives_compute_op,
false_negatives_compute_op, _, _,) = _tp_fn_tn_fp(
predictions, labels, thresholds, ignore_mask)
# avoid division by zero
epsilon = 1e-7
def compute_recall(name):
recall = math_ops.div(true_positives,
epsilon + true_positives + false_negatives,
name='recall_' + name)
return recall
recall = compute_recall('value')
with ops.control_dependencies([true_positives_compute_op,
false_negatives_compute_op]):
update_op = compute_recall('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
|
6cdaa7cf3b0d1c35204764fb78c4f9cefb09b577
| 3,640,610
|
def fib(n):
"""Returns the nth Fibonacci number."""
if n == 0:
return 1
elif n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
|
397d5714f45491dde68c13379fe2a6acafe55002
| 3,640,611
|
def template_review(context, mapping):
""":phabreview: Object describing the review for this changeset.
Has attributes `url` and `id`.
"""
ctx = context.resource(mapping, b'ctx')
m = _differentialrevisiondescre.search(ctx.description())
if m:
return templateutil.hybriddict({
b'url': m.group(b'url'),
b'id': b"D{}".format(m.group(b'id')),
})
|
f475cf717026329ecc3c1ed1ccaff89089423e50
| 3,640,614
|
def addRandomEdges(graph: nx.Graph, nEdges: int) -> tuple:
""" Adds random edges to a given graph """
nodes = list(graph.nodes)
n = len(nodes)
edges = []
for i in range(nEdges):
newEdge = False
while not newEdge:
i_u, i_v = np.random.randint(0, n-1), np.random.randint(0, n-1)
edge = (nodes[i_u], nodes[i_v])
if edge not in graph.edges(data=False) and edge not in edges:
newEdge = True
edges.append(edge)
g = graph.copy()
g.add_edges_from(edges)
return g, edges
|
004723ac17a431a266bae27c91316a66ced507f9
| 3,640,615
|
def get_s3_buckets_for_account(account, region='us-east-1'):
""" Get S3 buckets for a specific account.
:param account: AWS account
:param region: AWS region
"""
session = boto3.session.Session() # create session for Thread Safety
assume = rolesession.assume_crossact_audit_role(
session, account['accountNum'], region)
s3_data = []
if assume:
s3_client = assume.client('s3')
s3_info = s3_client.list_buckets().get('Buckets')
if s3_info:
for bucket in s3_info:
s3_global = is_s3_bucket_global(assume, bucket)
s3_data.append(
dict(BucketName=bucket['Name'],
AccountNum=account['accountNum'],
AccountAlias=account.get('alias'),
GlobalAccess=s3_global))
return s3_data
|
bc2a334bb6c358c43fb97336d3092c27372bd2d0
| 3,640,616
|
def get_users():
""" Alle Benutzer aus der Datenbank laden. """
session = get_cassandra_session()
future = session.execute_async("SELECT user_id, username, email FROM users")
try:
rows = future.result()
except Exception:
log.exeception()
users = []
for row in rows:
users.append({
'user_id': row.user_id,
'username': row.username,
'email': row.email
})
return jsonify({'users': users}), 200
|
c6f7b49447dd187e188e9094af3443fe3e4ed218
| 3,640,618
|
def vgconv(xinput,yinput,fwhm, ppr=None):
"""convolution with a Gaussian in log lambda scale
for a constant resolving power
Parameters
----------
xinput: numpy float array
wavelengths
yinput: numpy array of floats
fluxes
fwhm: float
FWHM of the Gaussian (km/s)
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default None, to keep the original sampling)
Returns
-------
x: numpy float array
wavelengths after convolution, will be a subset of xinput when that is equidistant
in log lambda, otherwise a subset of the resampled version
y: numpy array of floats
fluxes after convolution
"""
#resampling to ln(lambda) if need be
xx = np.diff(np.log(xinput))
if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda
nel = len(xinput)
minx = np.log(xinput[0])
maxx = np.log(xinput[-1])
x = np.linspace(minx,maxx,nel)
step = x[1] - x[0]
x = np.exp(x)
#y = np.interp( x, xinput, yinput)
y = interp_spl( x, xinput, yinput)
else:
x = xinput
y = yinput
step = np.log(xinput[1])-np.log(xinput[0])
fwhm = fwhm/clight # inverse of the resolving power
sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))
npoints = 2*int(3*fwhm/2./step)+1
half = npoints * step /2.
xx = np.linspace(-half,half,npoints)
kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
edge = int(npoints/2)
x = x[edge:-edge]
#print(xinput.size,x.size,y.size)
if ppr != None:
fac = int(fwhm / step / ppr)
print(fwhm,step,ppr,fac)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
|
d4722c87881eca27bac45cd47f84269249591cd0
| 3,640,620
|
def attach_component_to_entity(entity_id, component_name):
# type: (azlmbr.entity.EntityId, str) -> azlmbr.entity.EntityComponentIdPair
"""
Adds the component if not added already.
:param entity_id: EntityId of the entity to attach the component to
:param component_name: name of the component
:return: If successful, returns the EntityComponentIdPair, otherwise returns None.
"""
type_ids_list = editor.EditorComponentAPIBus(
bus.Broadcast, 'FindComponentTypeIdsByEntityType', [component_name], 0)
general.log(f"Components found = {len(type_ids_list)}")
if len(type_ids_list) < 1:
general.log(f"ERROR: A component class with name {component_name} doesn't exist")
return None
elif len(type_ids_list) > 1:
general.log(f"ERROR: Found more than one component classes with same name: {component_name}")
return None
# Before adding the component let's check if it is already attached to the entity.
component_outcome = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentOfType', entity_id, type_ids_list[0])
if component_outcome.IsSuccess():
return component_outcome.GetValue() # In this case the value is not a list.
component_outcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', entity_id, type_ids_list)
if component_outcome.IsSuccess():
general.log(f"{component_name} Component added to entity.")
return component_outcome.GetValue()[0]
general.log(f"ERROR: Failed to add component [{component_name}] to entity")
return None
|
f2c29f18ede8eef7accaf19970d18b0a432801ed
| 3,640,621
|
import yaml
from io import StringIO
def mix_to_dat(probspec,isStringIO=True):
"""
Reads a YAML mix file and generates all of the GMPL dat components associated with
the mix inputs.
Inputs:
ttspec - the tour type spec object created from the mix file
param_name - string name of paramter in GMPL file
non_shiftlen_param_name - string name of non-shift length specific mix parameter key in YAML file
shiftlen_param_name - string name of shift length specific mix parameter key in YAML file
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
# Open the mix file and load it into a YAML object
fn_mix = probspec['reqd_files']['filename_mix']
fin = open(fn_mix,"r")
ttspec = yaml.load(fin)
mixout = StringIO.StringIO()
## print ttspec
## print ttspec['tourtypes']
## print ttspec['tourtypes'][0]
## print ttspec['tourtypes'][0]['min_days_week']
# Get set of shift lengths and order them ascending by length
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
len_param = list_to_param('lengths', lengths)
# Number of shift lengths
n_lengths = size(lengths)
numlen_param = scalar_to_param('n_lengths', n_lengths)
# Number of tour types
n_ttypes = size(ttspec['tourtypes'])
numttypes_param = scalar_to_param('n_tts', n_ttypes)
# Tour type length sets
lenxset = get_length_x_from_mix(ttspec)
lenxset_set = list_to_indexedset('tt_length_x', lenxset)
# Midnight threshold for weekend assignments
midthresholds = [m['midnight_thresh'] for m in ttspec['tourtypes']]
midthresh_param = list_to_param('midnight_thresh', midthresholds)
# Parttime flag and bound
ptflags = [m['is_parttime'] for m in ttspec['tourtypes']]
ptflags_param = list_to_param('tt_parttime', ptflags)
ptfrac = ttspec['max_parttime_frac']
ptfrac_param = scalar_to_param('max_parttime_frac', ptfrac)
# Global start window width
width = ttspec['g_start_window_width']
width_param = scalar_to_param('g_start_window_width', width)
# Lower and upper bounds on number scheduled
if 'opt_files' in probspec and 'filename_ttbounds' in probspec['opt_files']:
fn_ttbnds = probspec['opt_files']['filename_ttbounds']
fin_ttbnds = open(fn_ttbnds,"r")
ttbndsspec = yaml.load(fin_ttbnds)
tt_lb = [m['tt_lb'] for m in ttbndsspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttbndsspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
else:
tt_lb = [m['tt_lb'] for m in ttspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
# Cost multiplier
tt_cost_multiplier = [m['tt_cost_multiplier'] for m in ttspec['tourtypes']]
tt_cost_multiplier_param = list_to_param('tt_cost_multiplier',
tt_cost_multiplier)
# Min and max cumulative days and prds worked over the weeks
tt_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_shiftlen_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_shiftlen_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_shiftlen_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked each week
tt_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Min and max cumulative days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_shiftlen_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_shiftlen_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_shiftlen_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Put the parameter pieces together into a single StringIO object
print >>mixout, numlen_param
print >>mixout, len_param
print >>mixout, numttypes_param
print >>mixout, lenxset_set
print >>mixout, midthresh_param
print >>mixout, tt_lb_param
print >>mixout, tt_ub_param
print >>mixout, tt_cost_multiplier_param
print >>mixout, ptflags_param
print >>mixout, ptfrac_param
print >>mixout, width_param
print >>mixout, tt_min_cumul_dys_weeks_param
print >>mixout, tt_max_cumul_dys_weeks_param
print >>mixout, tt_min_cumul_prds_weeks_param
print >>mixout, tt_max_cumul_prds_weeks_param
print >>mixout, tt_min_dys_weeks_param
print >>mixout, tt_max_dys_weeks_param
print >>mixout, tt_min_prds_weeks_param
print >>mixout, tt_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_dys_weeks_param
print >>mixout, tt_shiftlen_max_dys_weeks_param
print >>mixout, tt_shiftlen_min_prds_weeks_param
print >>mixout, tt_shiftlen_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_max_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_min_cumul_prds_weeks_param
print >>mixout, tt_shiftlen_max_cumul_prds_weeks_param
# print mixout.getvalue()
if isStringIO:
return mixout.getvalue()
else:
smixout = mixout.read()
return smixout
|
972c1118c8d7af6dc8f9ff87908b1ca7184c880e
| 3,640,623
|
from typing import Any
def get_setting(setting_name: str, default: Any=None) -> Any:
"""
Convenience wrapper to get the value of a setting.
"""
configuration = get_configuration()
if not configuration: # pragma: no cover
raise Exception('get_setting() called before configuration was initialised')
return configuration.get_setting_live(setting_name, default)
|
774ee06824a227ed66357cb46a5277c24ba11f09
| 3,640,624
|
def deceptivemultimodal(x: np.ndarray) -> float:
"""Infinitely many local optima, as we get closer to the optimum."""
assert len(x) >= 2
distance = np.sqrt(x[0]**2 + x[1]**2)
if distance == 0.:
return 0.
angle = np.arctan(x[0] / x[1]) if x[1] != 0. else np.pi / 2.
invdistance = int(1. / distance) if distance > 0. else 0.
if np.abs(np.cos(invdistance) - angle) > 0.1:
return 1.
return float(distance)
|
c08ab425bbc9803fcea9695c46acee71c2455873
| 3,640,625
|
from aiida.orm import Dict
from aiida_quantumespresso.utils.resources import get_default_options
def generate_inputs_ph(fixture_sandbox, fixture_localhost, fixture_code, generate_remote_data, generate_kpoints_mesh):
"""Generate default inputs for a `PhCalculation."""
def _generate_inputs_ph():
"""Generate default inputs for a `PhCalculation."""
inputs = {
'code': fixture_code('quantumespresso.matdyn'),
'parent_folder': generate_remote_data(fixture_localhost, fixture_sandbox.abspath, 'quantumespresso.pw'),
'qpoints': generate_kpoints_mesh(2),
'parameters': Dict(dict={'INPUTPH': {}}),
'metadata': {
'options': get_default_options()
}
}
return inputs
return _generate_inputs_ph
|
4ab1f46ff08094fccd4197a19ab56c31dc1ac93c
| 3,640,627
|
from urllib.parse import quote
def escape_url(raw):
"""
Escape urls to prevent code injection craziness. (Hopefully.)
"""
return quote(raw, safe="/#:")
|
4eee23f244998d2d2f4abd892a867f2e27f502a2
| 3,640,628
|
def split_sample(labels):
"""
Split the 'Sample' column of a DataFrame into a list.
Parameters
----------
labels: DataFrame
The Dataframe should contain a 'Sample' column for splitting.
Returns
-------
DataFrame
Updated DataFrame has 'Sample' column with a list of strings.
"""
sample_names = labels["Sample"].str.split(" ", n=1, expand=False)
labels['Sample'] = sample_names
return labels
|
483f1b78e07a2156aa3e48ae6c1f5ce41f5e60fe
| 3,640,629
|
def pmi_odds(pnx, pn, nnx, nn):
"""
Computes the PMI with odds
Args:
pnx (int): number of POSITIVE news with the term x
pn (int): number of POSITIVE news
nnx (int): number of NEGATIVE news with the term x
nn (int): number of NEGATIVE news
Returns:
float: PMI
"""
#print (pnx, pn, nnx, nn)
return _pmi_odds_(p_p(pnx, pn), p_n(nnx, nn))
|
5d4786f477fb12051a5a56887a7a7573aeab0802
| 3,640,630
|
def berDecodeLength(m, offset=0):
"""
Return a tuple of (length, lengthLength).
m must be atleast one byte long.
"""
l = ber2int(m[offset + 0:offset + 1])
ll = 1
if l & 0x80:
ll = 1 + (l & 0x7F)
need(m, offset + ll)
l = ber2int(m[offset + 1:offset + ll], signed=0)
return (l, ll)
|
e93252966e370088274f62bd512d59062e7431b2
| 3,640,631
|
def hasAspect(obj1, obj2, aspList):
""" Returns if there is an aspect between objects
considering a list of possible aspect types.
"""
aspType = aspectType(obj1, obj2, aspList)
return aspType != const.NO_ASPECT
|
71907043900d080f2254557fe0bd2420b9bf9ac3
| 3,640,632
|
def gen_decomposition(denovo_name, basis_names, weights, output_path, project, \
mtype, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction=False, statistics=None, sig_version=None, custom_text=None):
"""
Generate the correct plot based on mtype.
Parameters:
----------
denovo_name: (String) Name of denovo signature
basis_names: (List of Strings) Names of basis signatures
weights: (List of Strings) Percentile contribution for each basis signature
output_path: (String) Path to existing output directory
project: (String) Project name appended to file names
mtype: (String) The context 'mtype_options' has valid values
denovo_plots_dict (Dictionary) Signatures are keys, ByteIO plots are values
basis_plots_dict (Dictionary) Signatures are keys, ByteIO plots are values
reconstruction_plot_dict (Dictionary) Signatures are keys, ByteIO plots are values
reconstruction: (Boolean) True to generate plot w/ reconstruction
statistics: (Pandas Dataframe) Output from calculate_similarities()
"""
if mtype == "6":
print("Need to add support for SBS6 Decomposition")
elif mtype == "24":
print("Need to add support for SBS24 Decomposition")
elif mtype == "96":
byte_plot=spd_96.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "288":
byte_plot=spd_288.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "384":
print("Need to add support for SBS24 Decomposition")
elif mtype == "1536":
byte_plot=spd_1536.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "6144":
print("Need to add support for SBS6144 Decomposition")
elif mtype == "28":
print("Need to add support for ID28 Decomposition")
elif mtype == "83":
byte_plot=spd_83.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "415":
print("Need to add support for ID415 Decomposition")
elif mtype == "78":
byte_plot=spd_78.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "186":
print("Need to add support for DBS186 Decomposition")
elif mtype == "1248":
print("Need to add support for DBS1248 Decomposition")
elif mtype == "2976":
print("Need to add support for DBS2976 Decomposition")
elif mtype == "48":
byte_plot=cnv_48.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
|
9bb65728017a3f9f2a64ae94cb1ae7e15268c93b
| 3,640,633
|
from unittest.mock import Mock
def org(gh):
"""Creates an Org instance and adds an spy attribute to check for calls"""
ret = Organization(gh, name=ORG_NAME)
ret._gh = Mock(wraps=ret._gh)
ret.spy = ret._gh
return ret
|
017d044015ff60c91742ea2eb12e2cd7720328c6
| 3,640,634
|
def merge_regions(
out_path: str, sample1_id: int, regions1_file: File, sample2_id: int, regions2_file: File
) -> File:
"""
Merge two sorted region files into one.
"""
def iter_points(regions):
for start, end, depth in regions:
yield (start, "start", depth)
yield (end, "end", -depth)
def iter_regions(points):
first_point = next(points, None)
if first_point is None:
return
start, _, depth = first_point
for pos, kind, delta in points:
if pos > start:
yield (start, pos, depth)
start = pos
depth += delta
regions1 = read_regions(regions1_file)
regions2 = read_regions(regions2_file)
points1 = iter_points(regions1)
points2 = iter_points(regions2)
points = iter_merge(points1, points2)
regions = iter_regions(points)
region_path = f"{out_path}/regions/{sample1_id}_{sample2_id}.regions"
return write_regions(region_path, regions)
|
eeb4b8bf73df45ae9d6af39d0d8c9db04251da41
| 3,640,635
|
import hashlib
def get_text_hexdigest(data):
"""returns md5 hexadecimal checksum of string/unicode data
NOTE
----
The md5 sum of get_text_hexdigest can differ from get_file_hexdigest.
This will occur if the line ending character differs from being read in
'rb' versus 'r' modes.
"""
data_class = data.__class__
# fmt: off
if data_class in ("".__class__, u"".__class__):
data = data.encode("utf-8")
elif data.__class__ != b"".__class__:
raise TypeError("can only checksum string, unicode or bytes data")
# fmt: on
md5 = hashlib.md5()
md5.update(data)
return md5.hexdigest()
|
762115178406c0b49080b3076859a3d1c13ad356
| 3,640,636
|
import json
def recipe(recipe_id):
"""
Display the recipe on-page for each recipe id that was requested
"""
# Update the rating if it's an AJAX call
if request.method == "POST":
# check if user is login in order to proceed with rating
if not session:
return json.dumps({'status': 'not logged in'})
# check if the recipe id hasn't been change
if not is_valid(recipe_id):
return json.dumps({'status': 'error'})
# the query for the specific recipe that has to be rated
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
# if user want to rate it's own recipe return denied
if recipe["created_by"] == session["user"]:
return json.dumps({'status': 'denied'})
# check if user didn't altered the form value
new_rating = request.form.get("stars")
if int(new_rating) > 0 and int(new_rating) <= 5:
# update the recipe rating
rating = update_recipe_rating(mongo, new_rating, recipe)
return json.dumps({'status': 'success', 'rating': rating})
return json.dumps({'status': 'error'})
# check if the recipe id hasn't been change
if not is_valid(recipe_id):
return redirect(url_for('error', code=404))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# the query for the specific recipe that the user wants to access
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
# added in case the owner decide to delete the recipe while
# other users might by on this recipe page and cause an error
# after refresh the page as we access the recipe["recipe_name"] on page_set
# due to access None["recipe_name"]
if not recipe:
return redirect(url_for('error', code=404))
# set up the page_set object
page_set = {
"title": recipe["recipe_name"].title(),
"type": "recipe"
}
return render_template("pages/recipe.html",
recipe=recipe,
page_set=page_set,
nav_categories=nav_categories)
|
60db178d071d1880410e4e752ec484c4b59b0f96
| 3,640,637
|
def api_program_ordering(request, program):
"""Returns program-wide RF-aware ordering (used after indicator deletion on program page)"""
try:
data = ProgramPageIndicatorUpdateSerializer.load_for_pk(program).data
except Program.DoesNotExist:
logger.warning('attempt to access program page ordering for bad pk {}'.format(program))
return JsonResponse({'success': False, 'msg': 'bad Program PK'})
return JsonResponse(data)
|
d4966689b0ea65885456ad7b52cf5dfd845ac822
| 3,640,638
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.