text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def twisted_absolute_path(path, request):
"""Hack to fix twisted not accepting absolute URIs"""
parsed = urlparse.urlparse(request.uri)
if parsed.scheme != '':
path_parts = parsed.path.lstrip('/').split('/')
request.prepath = path_parts[0:1]
request.postpath = path_parts[1:]
path = request.prepath[0]
return path, request | [
"def",
"twisted_absolute_path",
"(",
"path",
",",
"request",
")",
":",
"parsed",
"=",
"urlparse",
".",
"urlparse",
"(",
"request",
".",
"uri",
")",
"if",
"parsed",
".",
"scheme",
"!=",
"''",
":",
"path_parts",
"=",
"parsed",
".",
"path",
".",
"lstrip",
... | 40.222222 | 6.333333 |
def nearest_neighbor(self,
vectors,
num=10,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Find the nearest neighbors to some arbitrary vector.
This function is meant to be used in composition operations. The
most_similar function can only handle items that are in vocab, and
looks up their vector through a dictionary. Compositions, e.g.
"King - man + woman" are necessarily not in the vocabulary.
Parameters
----------
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : list of tuples.
For each item in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is set to
false, only the distances are returned.
"""
vectors = np.array(vectors)
if np.ndim(vectors) == 1:
vectors = vectors[None, :]
result = []
result = self._batch(vectors,
batch_size,
num+1,
show_progressbar,
return_names)
return list(result) | [
"def",
"nearest_neighbor",
"(",
"self",
",",
"vectors",
",",
"num",
"=",
"10",
",",
"batch_size",
"=",
"100",
",",
"show_progressbar",
"=",
"False",
",",
"return_names",
"=",
"True",
")",
":",
"vectors",
"=",
"np",
".",
"array",
"(",
"vectors",
")",
"i... | 37.285714 | 17.693878 |
def static_method(cls, f):
"""Decorator which dynamically binds static methods to the model for later use."""
setattr(cls, f.__name__, staticmethod(f))
return f | [
"def",
"static_method",
"(",
"cls",
",",
"f",
")",
":",
"setattr",
"(",
"cls",
",",
"f",
".",
"__name__",
",",
"staticmethod",
"(",
"f",
")",
")",
"return",
"f"
] | 45.25 | 11.75 |
def rate_limit_info():
""" Returns (requests_remaining, minutes_to_reset) """
import json
import time
r = requests.get(gh_url + "/rate_limit", auth=login.auth())
out = json.loads(r.text)
mins = (out["resources"]["core"]["reset"]-time.time())/60
return out["resources"]["core"]["remaining"], mins | [
"def",
"rate_limit_info",
"(",
")",
":",
"import",
"json",
"import",
"time",
"r",
"=",
"requests",
".",
"get",
"(",
"gh_url",
"+",
"\"/rate_limit\"",
",",
"auth",
"=",
"login",
".",
"auth",
"(",
")",
")",
"out",
"=",
"json",
".",
"loads",
"(",
"r",
... | 35.111111 | 19.777778 |
def create_overwrites_for_quarter(self,
col_to_overwrites,
next_qtr_start_idx,
last_per_qtr,
quarters_with_estimates_for_sid,
requested_quarter,
sid,
sid_idx,
columns):
"""
Add entries to the dictionary of columns to adjustments for the given
sid and the given quarter.
Parameters
----------
col_to_overwrites : dict [column_name -> list of ArrayAdjustment]
A dictionary mapping column names to all overwrites for those
columns.
next_qtr_start_idx : int
The index of the first day of the next quarter in the calendar
dates.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter; this
is particularly useful for getting adjustments for 'next'
estimates.
quarters_with_estimates_for_sid : np.array
An array of all quarters for which there are estimates for the
given sid.
requested_quarter : float
The quarter for which the overwrite should be created.
sid : int
The sid for which to create overwrites.
sid_idx : int
The index of the sid in `assets`.
columns : list of BoundColumn
The columns for which to create overwrites.
"""
for col in columns:
column_name = self.name_map[col.name]
if column_name not in col_to_overwrites:
col_to_overwrites[column_name] = {}
# If there are estimates for the requested quarter,
# overwrite all values going up to the starting index of
# that quarter with estimates for that quarter.
if requested_quarter in quarters_with_estimates_for_sid:
adjs = self.create_overwrite_for_estimate(
col,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
)
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
# There are no estimates for the quarter. Overwrite all
# values going up to the starting index of that quarter
# with the missing value for this column.
else:
adjs = [self.overwrite_with_null(
col,
next_qtr_start_idx,
sid_idx)]
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx) | [
"def",
"create_overwrites_for_quarter",
"(",
"self",
",",
"col_to_overwrites",
",",
"next_qtr_start_idx",
",",
"last_per_qtr",
",",
"quarters_with_estimates_for_sid",
",",
"requested_quarter",
",",
"sid",
",",
"sid_idx",
",",
"columns",
")",
":",
"for",
"col",
"in",
... | 44.888889 | 16.083333 |
def download_large(self, image, url_field='url'):
"""Downlaod the binary data of an image attachment at large size.
:param str url_field: the field of the image with the right URL
:return: binary image data
:rtype: bytes
"""
return self.download(image, url_field=url_field, suffix='large') | [
"def",
"download_large",
"(",
"self",
",",
"image",
",",
"url_field",
"=",
"'url'",
")",
":",
"return",
"self",
".",
"download",
"(",
"image",
",",
"url_field",
"=",
"url_field",
",",
"suffix",
"=",
"'large'",
")"
] | 36.777778 | 19.666667 |
def UploadSignedConfigBlob(content,
aff4_path,
client_context=None,
limit=None,
token=None):
"""Upload a signed blob into the datastore.
Args:
content: File content to upload.
aff4_path: aff4 path to upload to.
client_context: The configuration contexts to use.
limit: The maximum size of the chunk to use.
token: A security token.
Raises:
IOError: On failure to write.
"""
if limit is None:
limit = config.CONFIG["Datastore.maximum_blob_size"]
# Get the values of these parameters which apply to the client running on the
# target platform.
if client_context is None:
# Default to the windows client.
client_context = ["Platform:Windows", "Client Context"]
config.CONFIG.Validate(
parameters="PrivateKeys.executable_signing_private_key")
signing_key = config.CONFIG.Get(
"PrivateKeys.executable_signing_private_key", context=client_context)
verification_key = config.CONFIG.Get(
"Client.executable_signing_public_key", context=client_context)
signed_binary_utils.WriteSignedBinary(
rdfvalue.RDFURN(aff4_path),
content,
signing_key,
public_key=verification_key,
chunk_size=limit,
token=token)
logging.info("Uploaded to %s", aff4_path) | [
"def",
"UploadSignedConfigBlob",
"(",
"content",
",",
"aff4_path",
",",
"client_context",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"token",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"None",
":",
"limit",
"=",
"config",
".",
"CONFIG",
"[",
"\"Datast... | 30.068182 | 17.977273 |
def get_ansible_groups(group_map):
"""
Constructs a list of :class:`ansible.inventory.group.Group` objects from a
map of lists of host strings.
"""
# Some of this logic is cribbed from
# ansible.inventory.script.InventoryScript
all_hosts = {}
group_all = Group('all')
groups = [group_all]
for gname, hosts in group_map.iteritems():
g = Group(gname)
for host in hosts:
h = all_hosts.get(host, Host(host))
all_hosts[host] = h
g.add_host(h)
group_all.add_host(h)
group_all.add_child_group(g)
groups.append(g)
return groups | [
"def",
"get_ansible_groups",
"(",
"group_map",
")",
":",
"# Some of this logic is cribbed from",
"# ansible.inventory.script.InventoryScript",
"all_hosts",
"=",
"{",
"}",
"group_all",
"=",
"Group",
"(",
"'all'",
")",
"groups",
"=",
"[",
"group_all",
"]",
"for",
"gname... | 29.714286 | 12.571429 |
def visit_Set(self, pattern):
""" Set have unordered values. """
if len(pattern.elts) > MAX_UNORDERED_LENGTH:
raise DamnTooLongPattern("Pattern for Set is too long")
return (isinstance(self.node, Set) and
any(self.check_list(self.node.elts, pattern_elts)
for pattern_elts in permutations(pattern.elts))) | [
"def",
"visit_Set",
"(",
"self",
",",
"pattern",
")",
":",
"if",
"len",
"(",
"pattern",
".",
"elts",
")",
">",
"MAX_UNORDERED_LENGTH",
":",
"raise",
"DamnTooLongPattern",
"(",
"\"Pattern for Set is too long\"",
")",
"return",
"(",
"isinstance",
"(",
"self",
".... | 52.714286 | 15.571429 |
def wait_until(obj, att, desired, callback=None, interval=5, attempts=0,
verbose=False, verbose_atts=None):
"""
When changing the state of an object, it will commonly be in a transitional
state until the change is complete. This will reload the object every
`interval` seconds, and check its `att` attribute until the `desired` value
is reached, or until the maximum number of attempts is reached. The updated
object is returned. It is up to the calling program to check the returned
object to make sure that it successfully reached the desired state.
Once the desired value of the attribute is reached, the method returns. If
not, it will re-try until the attribute's value matches one of the
`desired` values. By default (attempts=0) it will loop infinitely until the
attribute reaches the desired value. You can optionally limit the number of
times that the object is reloaded by passing a positive value to
`attempts`. If the attribute has not reached the desired value by then, the
method will exit.
If `verbose` is True, each attempt will print out the current value of the
watched attribute and the time that has elapsed since the original request.
Also, if `verbose_atts` is specified, the values of those attributes will
also be output. If `verbose` is False, then `verbose_atts` has no effect.
Note that `desired` can be a list of values; if the attribute becomes equal
to any of those values, this will succeed. For example, when creating a new
cloud server, it will initially have a status of 'BUILD', and you can't
work with it until its status is 'ACTIVE'. However, there might be a
problem with the build process, and the server will change to a status of
'ERROR'. So for this case you need to set the `desired` parameter to
`['ACTIVE', 'ERROR']`. If you simply pass 'ACTIVE' as the desired state,
this will loop indefinitely if a build fails, as the server will never
reach a status of 'ACTIVE'.
Since this process of waiting can take a potentially long time, and will
block your program's execution until the desired state of the object is
reached, you may specify a callback function. The callback can be any
callable that accepts a single parameter; the parameter it receives will be
either the updated object (success), or None (failure). If a callback is
specified, the program will return immediately after spawning the wait
process in a separate thread.
"""
if callback:
waiter = _WaitThread(obj=obj, att=att, desired=desired, callback=callback,
interval=interval, attempts=attempts, verbose=verbose,
verbose_atts=verbose_atts)
waiter.start()
return waiter
else:
return _wait_until(obj=obj, att=att, desired=desired, callback=None,
interval=interval, attempts=attempts, verbose=verbose,
verbose_atts=verbose_atts) | [
"def",
"wait_until",
"(",
"obj",
",",
"att",
",",
"desired",
",",
"callback",
"=",
"None",
",",
"interval",
"=",
"5",
",",
"attempts",
"=",
"0",
",",
"verbose",
"=",
"False",
",",
"verbose_atts",
"=",
"None",
")",
":",
"if",
"callback",
":",
"waiter"... | 57.686275 | 30.235294 |
def mac_address_table_consistency_check_mac_consistency_check_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table")
consistency_check = ET.SubElement(mac_address_table, "consistency-check")
mac_consistency_check_interval = ET.SubElement(consistency_check, "mac-consistency-check-interval")
mac_consistency_check_interval.text = kwargs.pop('mac_consistency_check_interval')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"mac_address_table_consistency_check_mac_consistency_check_interval",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"mac_address_table",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"mac-... | 59.727273 | 32.727273 |
def _setup_configuration(self):
"""
All steps are accepted as classes. Instantiate them with the right
configuration and set them in a local property.
"""
self.configuration = dict(
schema_cls=self.schema_cls,
allowed_actions=self.allowed_actions,
filter_by_fields=self.filter_by_fields)
authentication = self.authentication_cls(schema_cls=self.schema_cls)
authorization = self.authorization_cls()
schema_validation = self.schema_validation_cls(
schema_cls=self.schema_cls)
view_actions = self.view_actions_cls()
post_action_hooks = self.post_action_hooks_cls(
schema_cls=self.schema_cls)
response_converter = self.response_converter_cls(
schema_cls=self.schema_cls)
serializer = self.serializer_cls(schema_cls=self.schema_cls)
data_cleaner = self.data_cleaner_cls(schema_cls=self.schema_cls)
self.configuration.update(dict(
authentication=authentication,
authorization=authorization,
schema_validation=schema_validation,
view_actions=view_actions,
post_action_hooks=post_action_hooks,
response_converter=response_converter,
data_cleaner=data_cleaner,
serializer=serializer)) | [
"def",
"_setup_configuration",
"(",
"self",
")",
":",
"self",
".",
"configuration",
"=",
"dict",
"(",
"schema_cls",
"=",
"self",
".",
"schema_cls",
",",
"allowed_actions",
"=",
"self",
".",
"allowed_actions",
",",
"filter_by_fields",
"=",
"self",
".",
"filter_... | 42.806452 | 11.709677 |
def set(*args, **kw):
"""Set IRAF environment variables."""
if len(args) == 0:
if len(kw) != 0:
# normal case is only keyword,value pairs
for keyword, value in kw.items():
keyword = untranslateName(keyword)
svalue = str(value)
_varDict[keyword] = svalue
else:
# set with no arguments lists all variables (using same format
# as IRAF)
listVars(prefix=" ", equals="=")
else:
# The only other case allowed is the peculiar syntax
# 'set @filename', which only gets used in the zzsetenv.def file,
# where it reads extern.pkg. That file also gets read (in full cl
# mode) by clpackage.cl. I get errors if I read this during
# zzsetenv.def, so just ignore it here...
#
# Flag any other syntax as an error.
if (len(args) != 1 or len(kw) != 0 or
not isinstance(args[0], string_types) or args[0][:1] != '@'):
raise SyntaxError("set requires name=value pairs") | [
"def",
"set",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"if",
"len",
"(",
"kw",
")",
"!=",
"0",
":",
"# normal case is only keyword,value pairs",
"for",
"keyword",
",",
"value",
"in",
"kw",
".",... | 42.36 | 18.72 |
def send(self, sender: PytgbotApiBot):
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
"""
return sender.send_document(
# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id
document=self.document, chat_id=self.receiver, reply_to_message_id=self.reply_id, thumb=self.thumb, caption=self.caption, parse_mode=self.parse_mode, disable_notification=self.disable_notification, reply_markup=self.reply_markup
) | [
"def",
"send",
"(",
"self",
",",
"sender",
":",
"PytgbotApiBot",
")",
":",
"return",
"sender",
".",
"send_document",
"(",
"# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id",
"document",
"=",
"self",
".",
"document",
",",
... | 47.538462 | 31.846154 |
def synthese(self, month=None):
"""
month format: YYYYMM
"""
if month is None and self.legislature == '2012-2017':
raise AssertionError('Global Synthesis on legislature does not work, see https://github.com/regardscitoyens/nosdeputes.fr/issues/69')
if month is None:
month = 'data'
url = '%s/synthese/%s/%s' % (self.base_url, month, self.format)
data = requests.get(url).json()
return [depute[self.ptype] for depute in data[self.ptype_plural]] | [
"def",
"synthese",
"(",
"self",
",",
"month",
"=",
"None",
")",
":",
"if",
"month",
"is",
"None",
"and",
"self",
".",
"legislature",
"==",
"'2012-2017'",
":",
"raise",
"AssertionError",
"(",
"'Global Synthesis on legislature does not work, see https://github.com/regar... | 37.214286 | 25.785714 |
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers) | [
"def",
"add_group_email_grant",
"(",
"self",
",",
"permission",
",",
"email_address",
",",
"headers",
"=",
"None",
")",
":",
"acl",
"=",
"self",
".",
"get_acl",
"(",
"headers",
"=",
"headers",
")",
"acl",
".",
"add_group_email_grant",
"(",
"permission",
",",... | 47.45 | 20.75 |
def fiscal_code(self, gender: Optional[Gender] = None) -> str:
"""Return a random fiscal code.
:param gender: Gender's enum object.
:return: Fiscal code.
Example:
RSSMRA66R05D612U
"""
code = ''.join(self.random.choices(string.ascii_uppercase, k=6))
code += self.random.custom_code(mask='##')
month_codes = self._data['fiscal_code']['month_codes']
code += self.random.choice(month_codes)
birth_day = self.random.randint(101, 131)
self._validate_enum(gender, Gender)
if gender == Gender.FEMALE:
birth_day += 40
code += str(birth_day)[1:]
city_letters = self._data['fiscal_code']['city_letters']
code += self.random.choice(city_letters)
code += self.random.custom_code(mask='###@')
return code | [
"def",
"fiscal_code",
"(",
"self",
",",
"gender",
":",
"Optional",
"[",
"Gender",
"]",
"=",
"None",
")",
"->",
"str",
":",
"code",
"=",
"''",
".",
"join",
"(",
"self",
".",
"random",
".",
"choices",
"(",
"string",
".",
"ascii_uppercase",
",",
"k",
... | 30.777778 | 19.444444 |
def result(self):
""" Get the table used for the results of the query. If the query is incomplete, this blocks.
Raises:
Exception if we timed out waiting for results or the query failed.
"""
self.wait()
if self.failed:
raise Exception('Query failed: %s' % str(self.errors))
return self._table | [
"def",
"result",
"(",
"self",
")",
":",
"self",
".",
"wait",
"(",
")",
"if",
"self",
".",
"failed",
":",
"raise",
"Exception",
"(",
"'Query failed: %s'",
"%",
"str",
"(",
"self",
".",
"errors",
")",
")",
"return",
"self",
".",
"_table"
] | 32 | 20.8 |
def avl_release_parent(node):
"""
removes the parent of a child
"""
parent = node.parent
if parent is not None:
if parent.right is node:
parent.right = None
elif parent.left is node:
parent.left = None
else:
raise AssertionError('impossible state')
node.parent = None
parent.balance = max(height(parent.right), height(parent.left)) + 1
return node, parent | [
"def",
"avl_release_parent",
"(",
"node",
")",
":",
"parent",
"=",
"node",
".",
"parent",
"if",
"parent",
"is",
"not",
"None",
":",
"if",
"parent",
".",
"right",
"is",
"node",
":",
"parent",
".",
"right",
"=",
"None",
"elif",
"parent",
".",
"left",
"... | 29.4 | 12.466667 |
def tree_statistics(tree):
"""
prints the types and counts of elements present in a SaltDocument tree,
e.g.::
layers: 3
sDocument: 1
nodes: 252
labels: 2946
edges: 531
"""
all_elements = tree.findall('//')
tag_counter = defaultdict(int)
for element in all_elements:
tag_counter[element.tag] += 1
for (tag, counts) in tag_counter.items():
print "{0}: {1}".format(tag, counts) | [
"def",
"tree_statistics",
"(",
"tree",
")",
":",
"all_elements",
"=",
"tree",
".",
"findall",
"(",
"'//'",
")",
"tag_counter",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"element",
"in",
"all_elements",
":",
"tag_counter",
"[",
"element",
".",
"tag",
"]",... | 24.833333 | 16.388889 |
def gather_facts_list(self, file):
"""
Return a list of facts.
"""
facts = []
contents = utils.file_to_string(os.path.join(self.paths["role"],
file))
contents = re.sub(r"\s+", "", contents)
matches = self.regex_facts.findall(contents)
for match in matches:
facts.append(match.split(":")[1])
return facts | [
"def",
"gather_facts_list",
"(",
"self",
",",
"file",
")",
":",
"facts",
"=",
"[",
"]",
"contents",
"=",
"utils",
".",
"file_to_string",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"paths",
"[",
"\"role\"",
"]",
",",
"file",
")",
")",
"c... | 27.733333 | 16.666667 |
def run_cmds_on_all_switches(self, cmds):
"""Runs all cmds on all configured switches
This helper is used for ACL and rule creation/deletion as ACLs
and rules must exist on all switches.
"""
for switch in self._switches.values():
self.run_openstack_sg_cmds(cmds, switch) | [
"def",
"run_cmds_on_all_switches",
"(",
"self",
",",
"cmds",
")",
":",
"for",
"switch",
"in",
"self",
".",
"_switches",
".",
"values",
"(",
")",
":",
"self",
".",
"run_openstack_sg_cmds",
"(",
"cmds",
",",
"switch",
")"
] | 39.5 | 11.75 |
def WriteClientMetadata(self,
client_id,
certificate=None,
fleetspeak_enabled=None,
first_seen=None,
last_ping=None,
last_clock=None,
last_ip=None,
last_foreman=None,
cursor=None):
"""Write metadata about the client."""
placeholders = []
values = collections.OrderedDict()
placeholders.append("%(client_id)s")
values["client_id"] = db_utils.ClientIDToInt(client_id)
if certificate:
placeholders.append("%(certificate)s")
values["certificate"] = certificate.SerializeToString()
if fleetspeak_enabled is not None:
placeholders.append("%(fleetspeak_enabled)s")
values["fleetspeak_enabled"] = fleetspeak_enabled
if first_seen is not None:
placeholders.append("FROM_UNIXTIME(%(first_seen)s)")
values["first_seen"] = mysql_utils.RDFDatetimeToTimestamp(first_seen)
if last_ping is not None:
placeholders.append("FROM_UNIXTIME(%(last_ping)s)")
values["last_ping"] = mysql_utils.RDFDatetimeToTimestamp(last_ping)
if last_clock:
placeholders.append("FROM_UNIXTIME(%(last_clock)s)")
values["last_clock"] = mysql_utils.RDFDatetimeToTimestamp(last_clock)
if last_ip:
placeholders.append("%(last_ip)s")
values["last_ip"] = last_ip.SerializeToString()
if last_foreman:
placeholders.append("FROM_UNIXTIME(%(last_foreman)s)")
values["last_foreman"] = mysql_utils.RDFDatetimeToTimestamp(last_foreman)
updates = []
for column in iterkeys(values):
updates.append("{column} = VALUES({column})".format(column=column))
query = """
INSERT INTO clients ({columns})
VALUES ({placeholders})
ON DUPLICATE KEY UPDATE {updates}
""".format(
columns=", ".join(iterkeys(values)),
placeholders=", ".join(placeholders),
updates=", ".join(updates))
cursor.execute(query, values) | [
"def",
"WriteClientMetadata",
"(",
"self",
",",
"client_id",
",",
"certificate",
"=",
"None",
",",
"fleetspeak_enabled",
"=",
"None",
",",
"first_seen",
"=",
"None",
",",
"last_ping",
"=",
"None",
",",
"last_clock",
"=",
"None",
",",
"last_ip",
"=",
"None",
... | 38.169811 | 14.584906 |
def create_appointment_group(self, appointment_group, **kwargs):
"""
Create a new Appointment Group.
:calls: `POST /api/v1/appointment_groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.create>`_
:param appointment_group: The attributes of the appointment group.
:type appointment_group: `dict`
:param title: The title of the appointment group.
:type title: `str`
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup`
"""
from canvasapi.appointment_group import AppointmentGroup
if (
isinstance(appointment_group, dict) and
'context_codes' in appointment_group and
'title' in appointment_group
):
kwargs['appointment_group'] = appointment_group
elif (
isinstance(appointment_group, dict) and
'context_codes' not in appointment_group
):
raise RequiredFieldMissing(
"Dictionary with key 'context_codes' is missing."
)
elif isinstance(appointment_group, dict) and 'title' not in appointment_group:
raise RequiredFieldMissing("Dictionary with key 'title' is missing.")
response = self.__requester.request(
'POST',
'appointment_groups',
_kwargs=combine_kwargs(**kwargs)
)
return AppointmentGroup(self.__requester, response.json()) | [
"def",
"create_appointment_group",
"(",
"self",
",",
"appointment_group",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"canvasapi",
".",
"appointment_group",
"import",
"AppointmentGroup",
"if",
"(",
"isinstance",
"(",
"appointment_group",
",",
"dict",
")",
"and",
... | 36.825 | 23.125 |
def getContextsForExpressions(self, body, getFingerprint=None, startIndex=0, maxResults=5, sparsity=1.0):
"""Bulk get contexts for input expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Context
Raises:
CorticalioException: if the request was not successful
"""
return self._expressions.getContextsForBulkExpression(self._retina, body, getFingerprint, startIndex, maxResults, sparsity) | [
"def",
"getContextsForExpressions",
"(",
"self",
",",
"body",
",",
"getFingerprint",
"=",
"None",
",",
"startIndex",
"=",
"0",
",",
"maxResults",
"=",
"5",
",",
"sparsity",
"=",
"1.0",
")",
":",
"return",
"self",
".",
"_expressions",
".",
"getContextsForBulk... | 61.714286 | 35.785714 |
def w(self, units=None):
"""
This returns a single array containing the phase-space positions.
Parameters
----------
units : `~gala.units.UnitSystem` (optional)
The unit system to represent the position and velocity in
before combining into the full array.
Returns
-------
w : `~numpy.ndarray`
A numpy array of all positions and velocities, without units.
Will have shape ``(2*ndim,...)``.
"""
if units is None:
if self.hamiltonian is None:
units = dimensionless
else:
units = self.hamiltonian.units
return super(Orbit, self).w(units=units) | [
"def",
"w",
"(",
"self",
",",
"units",
"=",
"None",
")",
":",
"if",
"units",
"is",
"None",
":",
"if",
"self",
".",
"hamiltonian",
"is",
"None",
":",
"units",
"=",
"dimensionless",
"else",
":",
"units",
"=",
"self",
".",
"hamiltonian",
".",
"units",
... | 28.52 | 19.88 |
def write(self, fptr):
"""Write a JPEG 2000 Signature box to file.
"""
fptr.write(struct.pack('>I4s', 12, b'jP '))
fptr.write(struct.pack('>BBBB', *self.signature)) | [
"def",
"write",
"(",
"self",
",",
"fptr",
")",
":",
"fptr",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'>I4s'",
",",
"12",
",",
"b'jP '",
")",
")",
"fptr",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'>BBBB'",
",",
"*",
"self",
".",
"... | 38.6 | 9.4 |
def anonymous_login_view(request):
''' View for an admin to log her/himself out and login the anonymous user. '''
logout(request)
try:
spineless = User.objects.get(username=ANONYMOUS_USERNAME)
except User.DoesNotExist:
random_password = User.objects.make_random_password()
spineless = User.objects.create_user(username=ANONYMOUS_USERNAME, first_name="Anonymous", last_name="Coward", password=random_password)
spineless.is_active = False
spineless.save()
spineless_profile = UserProfile.objects.get(user=spineless)
spineless_profile.status = UserProfile.ALUMNUS
spineless_profile.save()
spineless.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, spineless)
request.session['ANONYMOUS_SESSION'] = True
messages.add_message(request, messages.INFO, MESSAGES['ANONYMOUS_LOGIN'])
return HttpResponseRedirect(reverse('homepage')) | [
"def",
"anonymous_login_view",
"(",
"request",
")",
":",
"logout",
"(",
"request",
")",
"try",
":",
"spineless",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"username",
"=",
"ANONYMOUS_USERNAME",
")",
"except",
"User",
".",
"DoesNotExist",
":",
"random_pas... | 51.388889 | 23.611111 |
def appendData(self, xdata, ydata, color='b', legendstr=None):
"""Adds the data to the plot
:param xdata: index values for data, plotted on x-axis
:type xdata: numpy.ndarray
:param ydata: value data to plot, dimension must match xdata
:type ydata: numpy.ndarray
"""
item = self.plot(xdata, ydata, pen=color)
if legendstr is not None:
self.legend.addItem(item, legendstr)
return item | [
"def",
"appendData",
"(",
"self",
",",
"xdata",
",",
"ydata",
",",
"color",
"=",
"'b'",
",",
"legendstr",
"=",
"None",
")",
":",
"item",
"=",
"self",
".",
"plot",
"(",
"xdata",
",",
"ydata",
",",
"pen",
"=",
"color",
")",
"if",
"legendstr",
"is",
... | 38 | 14.083333 |
def container_rename_folder(object_id, input_params={}, always_retry=False, **kwargs):
"""
Invokes the /container-xxxx/renameFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FrenameFolder
"""
return DXHTTPRequest('/%s/renameFolder' % object_id, input_params, always_retry=always_retry, **kwargs) | [
"def",
"container_rename_folder",
"(",
"object_id",
",",
"input_params",
"=",
"{",
"}",
",",
"always_retry",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"DXHTTPRequest",
"(",
"'/%s/renameFolder'",
"%",
"object_id",
",",
"input_params",
",",
"alw... | 57.285714 | 38.142857 |
def parse(file_path):
'''Parse a YAML or JSON file.'''
_, ext = path.splitext(file_path)
if ext in ('.yaml', '.yml'):
func = yaml.load
elif ext == '.json':
func = json.load
else:
raise ValueError("Unrecognized config file type %s" % ext)
with open(file_path, 'r') as f:
return func(f) | [
"def",
"parse",
"(",
"file_path",
")",
":",
"_",
",",
"ext",
"=",
"path",
".",
"splitext",
"(",
"file_path",
")",
"if",
"ext",
"in",
"(",
"'.yaml'",
",",
"'.yml'",
")",
":",
"func",
"=",
"yaml",
".",
"load",
"elif",
"ext",
"==",
"'.json'",
":",
"... | 20.625 | 22.625 |
def reorder_resource_views(self, resource_views):
# type: (List[Union[ResourceView,Dict,str]]) -> None
"""Order resource views in resource.
Args:
resource_views (List[Union[ResourceView,Dict,str]]): A list of either resource view ids or resource views metadata from ResourceView objects or dictionaries
Returns:
None
"""
if not isinstance(resource_views, list):
raise HDXError('ResourceViews should be a list!')
ids = list()
for resource_view in resource_views:
if isinstance(resource_view, str):
resource_view_id = resource_view
else:
resource_view_id = resource_view['id']
if is_valid_uuid(resource_view_id) is False:
raise HDXError('%s is not a valid resource view id!' % resource_view)
ids.append(resource_view_id)
_, result = self._read_from_hdx('resource view', self.data['id'], 'id',
ResourceView.actions()['reorder'], order=ids) | [
"def",
"reorder_resource_views",
"(",
"self",
",",
"resource_views",
")",
":",
"# type: (List[Union[ResourceView,Dict,str]]) -> None",
"if",
"not",
"isinstance",
"(",
"resource_views",
",",
"list",
")",
":",
"raise",
"HDXError",
"(",
"'ResourceViews should be a list!'",
"... | 46.086957 | 24.391304 |
def _create_session(self, username, password):
"""Create HTTP session.
Args:
username (str): Timesketch username
password (str): Timesketch password
Returns:
requests.Session: Session object.
"""
session = requests.Session()
session.verify = False # Depending on SSL cert is verifiable
try:
response = session.get(self.host_url)
except requests.exceptions.ConnectionError:
return False
# Get the CSRF token from the response
soup = BeautifulSoup(response.text, 'html.parser')
csrf_token = soup.find('input', dict(name='csrf_token'))['value']
login_data = dict(username=username, password=password)
session.headers.update({
'x-csrftoken': csrf_token,
'referer': self.host_url
})
_ = session.post('{0:s}/login/'.format(self.host_url), data=login_data)
return session | [
"def",
"_create_session",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"session",
".",
"verify",
"=",
"False",
"# Depending on SSL cert is verifiable",
"try",
":",
"response",
"=",
"session",
".... | 32.769231 | 16.384615 |
def run(self, node):
"""
Captures the use of locals() in render function.
"""
if self.get_call_name(node) != 'render':
return
issues = []
for arg in node.args:
if isinstance(arg, ast.Call) and arg.func.id == 'locals':
issues.append(
DJ03(
lineno=node.lineno,
col=node.col_offset,
)
)
return issues | [
"def",
"run",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"get_call_name",
"(",
"node",
")",
"!=",
"'render'",
":",
"return",
"issues",
"=",
"[",
"]",
"for",
"arg",
"in",
"node",
".",
"args",
":",
"if",
"isinstance",
"(",
"arg",
",",
"... | 30.125 | 13.75 |
def revoke_tokens(self):
"""
Revoke the authorization token and all tokens that were generated using it.
"""
self.is_active = False
self.save()
self.refresh_token.revoke_tokens() | [
"def",
"revoke_tokens",
"(",
"self",
")",
":",
"self",
".",
"is_active",
"=",
"False",
"self",
".",
"save",
"(",
")",
"self",
".",
"refresh_token",
".",
"revoke_tokens",
"(",
")"
] | 24.444444 | 19.111111 |
def p_primary_expr_no_brace_4(self, p):
"""primary_expr_no_brace : LPAREN expr RPAREN"""
if isinstance(p[2], self.asttypes.GroupingOp):
# this reduces the grouping operator to one.
p[0] = p[2]
else:
p[0] = self.asttypes.GroupingOp(expr=p[2])
p[0].setpos(p) | [
"def",
"p_primary_expr_no_brace_4",
"(",
"self",
",",
"p",
")",
":",
"if",
"isinstance",
"(",
"p",
"[",
"2",
"]",
",",
"self",
".",
"asttypes",
".",
"GroupingOp",
")",
":",
"# this reduces the grouping operator to one.",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
... | 40.125 | 12.875 |
def create(self, container, instances=None, map_name=None, **kwargs):
"""
Creates container instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance name to create. If not specified, will create all instances as specified in the
configuration (or just one default instance).
:type instances: tuple | list
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container creation.
:return: Return values of created containers.
:rtype: list[dockermap.map.runner.ActionOutput]
"""
return self.run_actions('create', container, instances=instances, map_name=map_name, **kwargs) | [
"def",
"create",
"(",
"self",
",",
"container",
",",
"instances",
"=",
"None",
",",
"map_name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"run_actions",
"(",
"'create'",
",",
"container",
",",
"instances",
"=",
"instances",
... | 55.117647 | 24.411765 |
def get_default_options(num_machines=1, max_wallclock_seconds=1800, withmpi=False):
"""
Return an instance of the options dictionary with the minimally required parameters
for a JobCalculation and set to default values unless overriden
:param num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param withmpi: if True the calculation will be run in MPI mode
"""
return {
'resources': {
'num_machines': int(num_machines)
},
'max_wallclock_seconds': int(max_wallclock_seconds),
'withmpi': withmpi,
} | [
"def",
"get_default_options",
"(",
"num_machines",
"=",
"1",
",",
"max_wallclock_seconds",
"=",
"1800",
",",
"withmpi",
"=",
"False",
")",
":",
"return",
"{",
"'resources'",
":",
"{",
"'num_machines'",
":",
"int",
"(",
"num_machines",
")",
"}",
",",
"'max_wa... | 40.5625 | 25.1875 |
def texto_decimal(valor, remover_zeros=True):
"""Converte um valor :py:class:`decimal.Decimal` para texto, com a opção de
remover os zeros à direita não significativos. A conversão para texto irá
considerar o :py:module:`locale` para converter o texto pronto para
apresentação.
:param decimal.Decimal valor: Valor a converter para texto.
:param bool remover_zeros: *Opcional* Indica se os zeros à direita não
significativos devem ser removidos do texto, o que irá incluir o
separador decimal se for o caso.
"""
texto = '{:n}'.format(valor)
if remover_zeros:
dp = locale.localeconv().get('decimal_point')
texto = texto.rstrip('0').rstrip(dp) if dp in texto else texto
return texto | [
"def",
"texto_decimal",
"(",
"valor",
",",
"remover_zeros",
"=",
"True",
")",
":",
"texto",
"=",
"'{:n}'",
".",
"format",
"(",
"valor",
")",
"if",
"remover_zeros",
":",
"dp",
"=",
"locale",
".",
"localeconv",
"(",
")",
".",
"get",
"(",
"'decimal_point'",... | 43.352941 | 21.117647 |
def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht,
method='basic'):
"""Extract a region of the image defined by corners (x1, y1) and
(x2, y2) and resample it to fit dimensions (new_wd, new_ht).
`method` describes the method of interpolation used, where the
default "basic" is nearest neighbor.
"""
if method in ('basic', 'view'):
shp = self.shape
(view, (scale_x, scale_y)) = \
trcalc.get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2,
new_wd, new_ht)
newdata = self._slice(view)
else:
data_np = self._get_data()
(newdata, (scale_x, scale_y)) = \
trcalc.get_scaled_cutout_wdht(data_np, x1, y1, x2, y2,
new_wd, new_ht,
interpolation=method,
logger=self.logger)
res = Bunch.Bunch(data=newdata, scale_x=scale_x, scale_y=scale_y)
return res | [
"def",
"get_scaled_cutout_wdht",
"(",
"self",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"new_wd",
",",
"new_ht",
",",
"method",
"=",
"'basic'",
")",
":",
"if",
"method",
"in",
"(",
"'basic'",
",",
"'view'",
")",
":",
"shp",
"=",
"self",
".",... | 41.148148 | 20.666667 |
def _gcs_list_keys(bucket, pattern):
""" List all Google Cloud Storage keys in a specified bucket that match a pattern. """
data = [{'Name': obj.metadata.name,
'Type': obj.metadata.content_type,
'Size': obj.metadata.size,
'Updated': obj.metadata.updated_on}
for obj in _gcs_get_keys(bucket, pattern)]
return google.datalab.utils.commands.render_dictionary(data, ['Name', 'Type', 'Size', 'Updated']) | [
"def",
"_gcs_list_keys",
"(",
"bucket",
",",
"pattern",
")",
":",
"data",
"=",
"[",
"{",
"'Name'",
":",
"obj",
".",
"metadata",
".",
"name",
",",
"'Type'",
":",
"obj",
".",
"metadata",
".",
"content_type",
",",
"'Size'",
":",
"obj",
".",
"metadata",
... | 55 | 11.5 |
def make_reply(msgname, types, arguments, major):
"""Helper method for constructing a reply message from a list or tuple.
Parameters
----------
msgname : str
Name of the reply message.
types : list of kattypes
The types of the reply message parameters (in order).
arguments : list of objects
The (unpacked) reply message parameters.
major : integer
Major version of KATCP to use when packing types
"""
status = arguments[0]
if status == "fail":
return Message.reply(
msgname, *pack_types((Str(), Str()), arguments, major))
if status == "ok":
return Message.reply(
msgname, *pack_types((Str(),) + types, arguments, major))
raise ValueError("First returned value must be 'ok' or 'fail'.") | [
"def",
"make_reply",
"(",
"msgname",
",",
"types",
",",
"arguments",
",",
"major",
")",
":",
"status",
"=",
"arguments",
"[",
"0",
"]",
"if",
"status",
"==",
"\"fail\"",
":",
"return",
"Message",
".",
"reply",
"(",
"msgname",
",",
"*",
"pack_types",
"(... | 34.173913 | 17.913043 |
def list_dms (archive, compression, cmd, verbosity, interactive):
"""List a DMS archive."""
check_archive_ext(archive)
return [cmd, 'v', archive] | [
"def",
"list_dms",
"(",
"archive",
",",
"compression",
",",
"cmd",
",",
"verbosity",
",",
"interactive",
")",
":",
"check_archive_ext",
"(",
"archive",
")",
"return",
"[",
"cmd",
",",
"'v'",
",",
"archive",
"]"
] | 38.5 | 11.25 |
def add_organization(db, organization):
"""Add an organization to the registry.
This function adds an organization to the registry.
It checks first whether the organization is already on the registry.
When it is not found, the new organization is added. Otherwise,
it raises a 'AlreadyExistsError' exception to notify that the organization
already exists.
:param db: database manager
:param organization: name of the organization
:raises InvalidValueError: raised when organization is None or an empty string
:raises AlreadyExistsError: raised when the organization already exists
in the registry.
"""
with db.connect() as session:
try:
add_organization_db(session, organization)
except ValueError as e:
raise InvalidValueError(e) | [
"def",
"add_organization",
"(",
"db",
",",
"organization",
")",
":",
"with",
"db",
".",
"connect",
"(",
")",
"as",
"session",
":",
"try",
":",
"add_organization_db",
"(",
"session",
",",
"organization",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",... | 38.52381 | 20.238095 |
def GetValueByPath(self, path_segments):
"""Retrieves a plist value by path.
Args:
path_segments (list[str]): path segment strings relative to the root
of the plist.
Returns:
object: The value of the key specified by the path or None.
"""
key = self.root_key
for path_segment in path_segments:
if isinstance(key, dict):
try:
key = key[path_segment]
except KeyError:
return None
elif isinstance(key, list):
try:
list_index = int(path_segment, 10)
except ValueError:
return None
key = key[list_index]
else:
return None
if not key:
return None
return key | [
"def",
"GetValueByPath",
"(",
"self",
",",
"path_segments",
")",
":",
"key",
"=",
"self",
".",
"root_key",
"for",
"path_segment",
"in",
"path_segments",
":",
"if",
"isinstance",
"(",
"key",
",",
"dict",
")",
":",
"try",
":",
"key",
"=",
"key",
"[",
"pa... | 21.090909 | 21.69697 |
def site_coordination_numbers( self ):
"""
Returns a dictionary of the coordination numbers for each site label. e.g.::
{ 'A' : { 4 }, 'B' : { 2, 4 } }
Args:
none
Returns:
coordination_numbers (Dict(Str:Set(Int))): dictionary of coordination
numbers for each site label.
"""
coordination_numbers = {}
for l in self.site_labels:
coordination_numbers[ l ] = set( [ len( site.neighbours ) for site in self.sites if site.label is l ] )
return coordination_numbers | [
"def",
"site_coordination_numbers",
"(",
"self",
")",
":",
"coordination_numbers",
"=",
"{",
"}",
"for",
"l",
"in",
"self",
".",
"site_labels",
":",
"coordination_numbers",
"[",
"l",
"]",
"=",
"set",
"(",
"[",
"len",
"(",
"site",
".",
"neighbours",
")",
... | 36.647059 | 24.294118 |
def get_active_clients():
"""Get a list of all active clients and their status"""
global drivers
if not drivers:
return jsonify([])
result = {client: get_client_info(client) for client in drivers}
return jsonify(result) | [
"def",
"get_active_clients",
"(",
")",
":",
"global",
"drivers",
"if",
"not",
"drivers",
":",
"return",
"jsonify",
"(",
"[",
"]",
")",
"result",
"=",
"{",
"client",
":",
"get_client_info",
"(",
"client",
")",
"for",
"client",
"in",
"drivers",
"}",
"retur... | 26.777778 | 21.555556 |
def volume_down(self, delta=0.1):
""" Decrement the volume by 0.1 (or delta) unless it is already 0.
Returns the new volume.
"""
if delta <= 0:
raise ValueError(
"volume delta must be greater than zero, not {}".format(delta))
return self.set_volume(self.status.volume_level - delta) | [
"def",
"volume_down",
"(",
"self",
",",
"delta",
"=",
"0.1",
")",
":",
"if",
"delta",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"volume delta must be greater than zero, not {}\"",
".",
"format",
"(",
"delta",
")",
")",
"return",
"self",
".",
"set_volume",
... | 42.875 | 13.5 |
def send_at(self, value):
"""A unix timestamp specifying when your email should
be delivered.
:param value: A unix timestamp specifying when your email should
be delivered.
:type value: SendAt, int
"""
if isinstance(value, SendAt):
if value.personalization is not None:
try:
personalization = \
self._personalizations[value.personalization]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
personalization.send_at = value.send_at
if not has_internal_personalization:
self.add_personalization(
personalization, index=value.personalization)
else:
self._send_at = value
else:
self._send_at = SendAt(value) | [
"def",
"send_at",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"SendAt",
")",
":",
"if",
"value",
".",
"personalization",
"is",
"not",
"None",
":",
"try",
":",
"personalization",
"=",
"self",
".",
"_personalizations",
"[",
... | 37.923077 | 15.461538 |
def date_suggestions():
"""
Returns a list of relative date that is presented to the user as auto
complete suggestions.
"""
# don't use strftime, prevent locales to kick in
days_of_week = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday"
}
dates = [
'today',
'tomorrow',
]
# show days of week up to next week
dow = datetime.date.today().weekday()
for i in range(dow + 2 % 7, dow + 7):
dates.append(days_of_week[i % 7])
# and some more relative days starting from next week
dates += ["1w", "2w", "1m", "2m", "3m", "1y"]
return dates | [
"def",
"date_suggestions",
"(",
")",
":",
"# don't use strftime, prevent locales to kick in",
"days_of_week",
"=",
"{",
"0",
":",
"\"Monday\"",
",",
"1",
":",
"\"Tuesday\"",
",",
"2",
":",
"\"Wednesday\"",
",",
"3",
":",
"\"Thursday\"",
",",
"4",
":",
"\"Friday\... | 23.2 | 19.533333 |
def prepare_docset(
source, dest, name, index_page, enable_js, online_redirect_url
):
"""
Create boilerplate files & directories and copy vanilla docs inside.
Return a tuple of path to resources and connection to sqlite db.
"""
resources = os.path.join(dest, "Contents", "Resources")
docs = os.path.join(resources, "Documents")
os.makedirs(resources)
db_conn = sqlite3.connect(os.path.join(resources, "docSet.dsidx"))
db_conn.row_factory = sqlite3.Row
db_conn.execute(
"CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, "
"type TEXT, path TEXT)"
)
db_conn.commit()
plist_path = os.path.join(dest, "Contents", "Info.plist")
plist_cfg = {
"CFBundleIdentifier": name,
"CFBundleName": name,
"DocSetPlatformFamily": name.lower(),
"DashDocSetFamily": "python",
"isDashDocset": True,
"isJavaScriptEnabled": enable_js,
}
if index_page is not None:
plist_cfg["dashIndexFilePath"] = index_page
if online_redirect_url is not None:
plist_cfg["DashDocSetFallbackURL"] = online_redirect_url
write_plist(plist_cfg, plist_path)
shutil.copytree(source, docs)
return DocSet(path=dest, docs=docs, plist=plist_path, db_conn=db_conn) | [
"def",
"prepare_docset",
"(",
"source",
",",
"dest",
",",
"name",
",",
"index_page",
",",
"enable_js",
",",
"online_redirect_url",
")",
":",
"resources",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest",
",",
"\"Contents\"",
",",
"\"Resources\"",
")",
"doc... | 32.153846 | 19.897436 |
def match(self, messy_data, threshold=0.5, n_matches=1, generator=False): # pragma: no cover
"""Identifies pairs of records that refer to the same entity, returns
tuples containing a set of record ids and a confidence score as a float
between 0 and 1. The record_ids within each set should refer to the
same entity and the confidence score is the estimated probability that
the records refer to the same entity.
This method should only used for small to moderately sized datasets
for larger data, use matchBlocks
Arguments:
messy_data -- Dictionary of records from messy dataset, where the
keys are record_ids and the values are dictionaries with
the keys being field names
threshold -- Number between 0 and 1 (default is .5). We will consider
records as potential duplicates if the predicted
probability of being a duplicate is above the threshold.
Lowering the number will increase recall, raising it
will increase precision
n_matches -- Maximum number of possible matches from the canonical
record set to match against each record in the messy
record set
"""
blocked_pairs = self._blockData(messy_data)
clusters = self.matchBlocks(blocked_pairs, threshold, n_matches)
clusters = (cluster for cluster in clusters if len(cluster))
if generator:
return clusters
else:
return list(clusters) | [
"def",
"match",
"(",
"self",
",",
"messy_data",
",",
"threshold",
"=",
"0.5",
",",
"n_matches",
"=",
"1",
",",
"generator",
"=",
"False",
")",
":",
"# pragma: no cover",
"blocked_pairs",
"=",
"self",
".",
"_blockData",
"(",
"messy_data",
")",
"clusters",
"... | 44.25 | 27.194444 |
def from_query(query, engine=None, limit=None):
"""
Execute an ORM style query, and return the result in
:class:`prettytable.PrettyTable`.
:param query: an ``sqlalchemy.orm.Query`` object.
:param engine: an ``sqlalchemy.engine.base.Engine`` object.
:param limit: int, limit rows to return.
:return: a ``prettytable.PrettyTable`` object
**中文文档**
将通过ORM的查询结果中的数据放入prettytable中.
"""
if limit is not None:
query = query.limit(limit)
result_proxy = execute_query_return_result_proxy(query)
return from_db_cursor(result_proxy.cursor) | [
"def",
"from_query",
"(",
"query",
",",
"engine",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"not",
"None",
":",
"query",
"=",
"query",
".",
"limit",
"(",
"limit",
")",
"result_proxy",
"=",
"execute_query_return_result_proxy",
... | 30.157895 | 16.578947 |
def enable_cloud_integration(self, id, **kwargs): # noqa: E501
"""Enable a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enable_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.enable_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.enable_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data | [
"def",
"enable_cloud_integration",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"ena... | 43.238095 | 20.047619 |
def H9(self):
"Entropy."
if not hasattr(self, '_H9'):
self._H9 = -(self.P * np.log(self.P + self.eps)).sum(2).sum(1)
return self._H9 | [
"def",
"H9",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_H9'",
")",
":",
"self",
".",
"_H9",
"=",
"-",
"(",
"self",
".",
"P",
"*",
"np",
".",
"log",
"(",
"self",
".",
"P",
"+",
"self",
".",
"eps",
")",
")",
".",
"s... | 32.8 | 20.8 |
def publish(dataset_uri):
"""Return access URL to HTTP enabled (published) dataset.
Exits with error code 1 if the dataset_uri is not a dataset.
Exits with error code 2 if the dataset cannot be HTTP enabled.
"""
try:
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
except dtoolcore.DtoolCoreTypeError:
print("Not a dataset: {}".format(dataset_uri))
sys.exit(1)
try:
access_uri = dataset._storage_broker.http_enable()
except AttributeError:
print(
"Datasets of type '{}' cannot be published using HTTP".format(
dataset._storage_broker.key)
)
sys.exit(2)
return access_uri | [
"def",
"publish",
"(",
"dataset_uri",
")",
":",
"try",
":",
"dataset",
"=",
"dtoolcore",
".",
"DataSet",
".",
"from_uri",
"(",
"dataset_uri",
")",
"except",
"dtoolcore",
".",
"DtoolCoreTypeError",
":",
"print",
"(",
"\"Not a dataset: {}\"",
".",
"format",
"(",... | 29.304348 | 22.086957 |
def get_uint16(self):
"""Read the next token and interpret it as a 16-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
value = self.get_int()
if value < 0 or value > 65535:
raise dns.exception.SyntaxError('%d is not an unsigned 16-bit integer' % value)
return value | [
"def",
"get_uint16",
"(",
"self",
")",
":",
"value",
"=",
"self",
".",
"get_int",
"(",
")",
"if",
"value",
"<",
"0",
"or",
"value",
">",
"65535",
":",
"raise",
"dns",
".",
"exception",
".",
"SyntaxError",
"(",
"'%d is not an unsigned 16-bit integer'",
"%",... | 29.666667 | 19.083333 |
def mousePressEvent(self, event):
"""
Overloads when a mouse press occurs. If in editable mode, and the
click occurs on a selected index, then the editor will be created
and no selection change will occur.
:param event | <QMousePressEvent>
"""
item = self.itemAt(event.pos())
column = self.columnAt(event.pos().x())
mid_button = event.button() == QtCore.Qt.MidButton
ctrl_click = event.button() == QtCore.Qt.LeftButton and \
event.modifiers() == QtCore.Qt.ControlModifier
if item and column != -1:
self._downItem = weakref.ref(item)
self._downColumn = column
self._downState = item.checkState(column)
elif not item:
self.setCurrentItem(None)
self.clearSelection()
if (mid_button or ctrl_click) and item and column != -1:
self.itemMiddleClicked.emit(item, column)
index = self.indexAt(event.pos())
sel_model = self.selectionModel()
if self.isEditable() and index and sel_model.isSelected(index):
sel_model.setCurrentIndex(index, sel_model.NoUpdate)
self.edit(index, self.SelectedClicked, event)
event.accept()
else:
super(XTreeWidget, self).mousePressEvent(event) | [
"def",
"mousePressEvent",
"(",
"self",
",",
"event",
")",
":",
"item",
"=",
"self",
".",
"itemAt",
"(",
"event",
".",
"pos",
"(",
")",
")",
"column",
"=",
"self",
".",
"columnAt",
"(",
"event",
".",
"pos",
"(",
")",
".",
"x",
"(",
")",
")",
"mi... | 37.638889 | 18.694444 |
def format(self, record):
"""Space debug messages for more legibility."""
if record.levelno == logging.DEBUG:
record.msg = ' {}'.format(record.msg)
return super(AuditLogFormatter, self).format(record) | [
"def",
"format",
"(",
"self",
",",
"record",
")",
":",
"if",
"record",
".",
"levelno",
"==",
"logging",
".",
"DEBUG",
":",
"record",
".",
"msg",
"=",
"' {}'",
".",
"format",
"(",
"record",
".",
"msg",
")",
"return",
"super",
"(",
"AuditLogFormatter",
... | 46.4 | 9.4 |
def transform(self, buffer, mode=None, vertices=-1, *, first=0, instances=1) -> None:
'''
Transform vertices.
Stores the output in a single buffer.
The transform primitive (mode) must be the same as
the input primitive of the GeometryShader.
Args:
buffer (Buffer): The buffer to store the output.
mode (int): By default :py:data:`POINTS` will be used.
vertices (int): The number of vertices to transform.
Keyword Args:
first (int): The index of the first vertex to start with.
instances (int): The number of instances.
'''
if mode is None:
mode = POINTS
self.mglo.transform(buffer.mglo, mode, vertices, first, instances) | [
"def",
"transform",
"(",
"self",
",",
"buffer",
",",
"mode",
"=",
"None",
",",
"vertices",
"=",
"-",
"1",
",",
"*",
",",
"first",
"=",
"0",
",",
"instances",
"=",
"1",
")",
"->",
"None",
":",
"if",
"mode",
"is",
"None",
":",
"mode",
"=",
"POINT... | 38.095238 | 26.285714 |
def _compute_k(self, tau):
r"""Evaluate the kernel directly at the given values of `tau`.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
Returns
-------
k : :py:class:`Array`, (`M`,)
:math:`k(\tau)` (less the :math:`\sigma^2` prefactor).
"""
y = self._compute_y(tau)
return y**(-self.params[1]) | [
"def",
"_compute_k",
"(",
"self",
",",
"tau",
")",
":",
"y",
"=",
"self",
".",
"_compute_y",
"(",
"tau",
")",
"return",
"y",
"**",
"(",
"-",
"self",
".",
"params",
"[",
"1",
"]",
")"
] | 30.2 | 14.8 |
def list_subdirs(self, marker=None, limit=None, prefix=None, delimiter=None,
full_listing=False):
"""
Return a list of the namesrepresenting the pseudo-subdirectories in
this container. You can use the marker and limit params to handle
pagination, and the prefix param to filter the objects returned. The
delimiter param is there for backwards compatibility only, as the call
requires the delimiter to be '/'.
"""
return self.manager.list_subdirs(self, marker=marker, limit=limit,
prefix=prefix, delimiter=delimiter, full_listing=full_listing) | [
"def",
"list_subdirs",
"(",
"self",
",",
"marker",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"prefix",
"=",
"None",
",",
"delimiter",
"=",
"None",
",",
"full_listing",
"=",
"False",
")",
":",
"return",
"self",
".",
"manager",
".",
"list_subdirs",
"(... | 56.818182 | 23.545455 |
def _send_commit_request(self, retry_delay=None, attempt=None):
"""Send a commit request with our last_processed_offset"""
# If there's a _commit_call, and it's not active, clear it, it probably
# just called us...
if self._commit_call and not self._commit_call.active():
self._commit_call = None
# Make sure we only have one outstanding commit request at a time
if self._commit_req is not None:
raise OperationInProgress(self._commit_req)
# Handle defaults
if retry_delay is None:
retry_delay = self.retry_init_delay
if attempt is None:
attempt = 1
# Create new OffsetCommitRequest with the latest processed offset
commit_offset = self._last_processed_offset
commit_request = OffsetCommitRequest(
self.topic, self.partition, commit_offset,
TIMESTAMP_INVALID, self.commit_metadata)
log.debug("Committing off=%d grp=%s tpc=%s part=%s req=%r",
self._last_processed_offset, self.consumer_group,
self.topic, self.partition, commit_request)
# Send the request, add our callbacks
self._commit_req = d = self.client.send_offset_commit_request(
self.consumer_group, [commit_request])
d.addBoth(self._clear_commit_req)
d.addCallbacks(
self._update_committed_offset, self._handle_commit_error,
callbackArgs=(commit_offset,),
errbackArgs=(retry_delay, attempt)) | [
"def",
"_send_commit_request",
"(",
"self",
",",
"retry_delay",
"=",
"None",
",",
"attempt",
"=",
"None",
")",
":",
"# If there's a _commit_call, and it's not active, clear it, it probably",
"# just called us...",
"if",
"self",
".",
"_commit_call",
"and",
"not",
"self",
... | 43.228571 | 18.942857 |
def save(self, filename):
"""Save metadata to XML file"""
with io.open(filename,'w',encoding='utf-8') as f:
f.write(self.xml()) | [
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"with",
"io",
".",
"open",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"xml",
"(",
")",
")"
] | 38 | 10.25 |
def get_hash(self, ireq, ireq_hashes=None):
"""
Retrieve hashes for a specific ``InstallRequirement`` instance.
:param ireq: An ``InstallRequirement`` to retrieve hashes for
:type ireq: :class:`~pip_shims.InstallRequirement`
:return: A set of hashes.
:rtype: Set
"""
# We _ALWAYS MUST PRIORITIZE_ the inclusion of hashes from local sources
# PLEASE *DO NOT MODIFY THIS* TO CHECK WHETHER AN IREQ ALREADY HAS A HASH
# RESOLVED. The resolver will pull hashes from PyPI and only from PyPI.
# The entire purpose of this approach is to include missing hashes.
# This fixes a race condition in resolution for missing dependency caches
# see pypa/pipenv#3289
if not self._should_include_hash(ireq):
return set()
elif self._should_include_hash(ireq) and (
not ireq_hashes or ireq.link.scheme == "file"
):
if not ireq_hashes:
ireq_hashes = set()
new_hashes = self.resolver.repository._hash_cache.get_hash(ireq.link)
ireq_hashes = add_to_set(ireq_hashes, new_hashes)
else:
ireq_hashes = set(ireq_hashes)
# The _ONLY CASE_ where we flat out set the value is if it isn't present
# It's a set, so otherwise we *always* need to do a union update
if ireq not in self.hashes:
return ireq_hashes
else:
return self.hashes[ireq] | ireq_hashes | [
"def",
"get_hash",
"(",
"self",
",",
"ireq",
",",
"ireq_hashes",
"=",
"None",
")",
":",
"# We _ALWAYS MUST PRIORITIZE_ the inclusion of hashes from local sources",
"# PLEASE *DO NOT MODIFY THIS* TO CHECK WHETHER AN IREQ ALREADY HAS A HASH",
"# RESOLVED. The resolver will pull hashes from... | 44.606061 | 21.333333 |
def encode(self, word):
"""Return the Norphone code.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The Norphone code
Examples
--------
>>> pe = Norphone()
>>> pe.encode('Hansen')
'HNSN'
>>> pe.encode('Larsen')
'LRSN'
>>> pe.encode('Aagaard')
'ÅKRT'
>>> pe.encode('Braaten')
'BRTN'
>>> pe.encode('Sandvik')
'SNVK'
"""
word = word.upper()
code = ''
skip = 0
if word[0:2] == 'AA':
code = 'Å'
skip = 2
elif word[0:2] == 'GI':
code = 'J'
skip = 2
elif word[0:3] == 'SKY':
code = 'X'
skip = 3
elif word[0:2] == 'EI':
code = 'Æ'
skip = 2
elif word[0:2] == 'KY':
code = 'X'
skip = 2
elif word[:1] == 'C':
code = 'K'
skip = 1
elif word[:1] == 'Ä':
code = 'Æ'
skip = 1
elif word[:1] == 'Ö':
code = 'Ø'
skip = 1
if word[-2:] == 'DT':
word = word[:-2] + 'T'
# Though the rules indicate this rule applies in all positions, the
# reference implementation indicates it applies only in final position.
elif word[-2:-1] in self._uc_v_set and word[-1:] == 'D':
word = word[:-2]
for pos, char in enumerate(word):
if skip:
skip -= 1
else:
for length in sorted(self._replacements, reverse=True):
if word[pos : pos + length] in self._replacements[length]:
code += self._replacements[length][
word[pos : pos + length]
]
skip = length - 1
break
else:
if not pos or char not in self._uc_v_set:
code += char
code = self._delete_consecutive_repeats(code)
return code | [
"def",
"encode",
"(",
"self",
",",
"word",
")",
":",
"word",
"=",
"word",
".",
"upper",
"(",
")",
"code",
"=",
"''",
"skip",
"=",
"0",
"if",
"word",
"[",
"0",
":",
"2",
"]",
"==",
"'AA'",
":",
"code",
"=",
"'Å'",
"skip",
"=",
"2",
"elif",
"... | 25.39759 | 19.843373 |
def _fetchall(self, query, vars, limit=None, offset=0):
"""
Return multiple rows.
"""
if limit is None:
limit = current_app.config['DEFAULT_PAGE_SIZE']
query += ' LIMIT %s OFFSET %s''' % (limit, offset)
cursor = self.get_db().cursor()
self._log(cursor, query, vars)
cursor.execute(query, vars)
return cursor.fetchall() | [
"def",
"_fetchall",
"(",
"self",
",",
"query",
",",
"vars",
",",
"limit",
"=",
"None",
",",
"offset",
"=",
"0",
")",
":",
"if",
"limit",
"is",
"None",
":",
"limit",
"=",
"current_app",
".",
"config",
"[",
"'DEFAULT_PAGE_SIZE'",
"]",
"query",
"+=",
"'... | 35.636364 | 8.545455 |
def get_logged_in_account(token_manager=None,
app_url=defaults.APP_URL):
"""
get the account details for logged in account of the auth token_manager
"""
return get_logged_in_account(token_manager=token_manager,
app_url=app_url)['id'] | [
"def",
"get_logged_in_account",
"(",
"token_manager",
"=",
"None",
",",
"app_url",
"=",
"defaults",
".",
"APP_URL",
")",
":",
"return",
"get_logged_in_account",
"(",
"token_manager",
"=",
"token_manager",
",",
"app_url",
"=",
"app_url",
")",
"[",
"'id'",
"]"
] | 37.75 | 16 |
def merge_global_settings(config, cli_options):
# type: (dict, dict) -> None
"""Merge "global" CLI options into main config
:param dict config: config dict
:param dict cli_options: cli options
"""
# check for valid version from YAML
if (not blobxfer.util.is_none_or_empty(config) and
('version' not in config or
config['version'] not in _SUPPORTED_YAML_CONFIG_VERSIONS)):
raise ValueError('"version" not specified in YAML config or invalid')
# get action
action = cli_options['_action']
if (action != TransferAction.Upload.name.lower() and
action != TransferAction.Download.name.lower() and
action != TransferAction.Synccopy.name.lower()):
raise ValueError('invalid action: {}'.format(action))
# merge credentials
if 'azure_storage' in cli_options:
if 'azure_storage' not in config:
config['azure_storage'] = {}
config['azure_storage'] = blobxfer.util.merge_dict(
config['azure_storage'], cli_options['azure_storage'])
if ('azure_storage' not in config or
blobxfer.util.is_none_or_empty(config['azure_storage'])):
raise ValueError('azure storage settings not specified')
# create action options
if action not in config:
config[action] = []
# append full specs, if they exist
if action in cli_options:
if 'source' in cli_options[action]:
srcdst = {
'source': cli_options[action]['source'],
'destination': cli_options[action]['destination'],
}
cli_options[action].pop('source')
cli_options[action].pop('destination')
config[action].append(srcdst)
# merge general and concurrency options
if 'options' not in config:
config['options'] = {}
if 'concurrency' not in config['options']:
config['options']['concurrency'] = {}
if 'timeout' not in config['options']:
config['options']['timeout'] = {}
if 'proxy' not in config['options']:
config['options']['proxy'] = {}
options = {
'enable_azure_storage_logger': _merge_setting(
cli_options, config['options'], 'enable_azure_storage_logger'),
'log_file': _merge_setting(cli_options, config['options'], 'log_file'),
'progress_bar': _merge_setting(
cli_options, config['options'], 'progress_bar', default=True),
'resume_file': _merge_setting(
cli_options, config['options'], 'resume_file'),
'timeout': {
# TODO deprecated timeout setting
'timeout': _merge_setting(
cli_options, config['options']['timeout'], 'timeout',
name_cli='timeout'),
'connect': _merge_setting(
cli_options, config['options']['timeout'], 'connect',
name_cli='connect_timeout'),
'read': _merge_setting(
cli_options, config['options']['timeout'], 'read',
name_cli='read_timeout'),
'max_retries': _merge_setting(
cli_options, config['options']['timeout'], 'max_retries',
default=1000),
},
'verbose': _merge_setting(
cli_options, config['options'], 'verbose', default=False),
'quiet': _merge_setting(
cli_options, config['options'], 'quiet', default=False),
'dry_run': _merge_setting(
cli_options, config['options'], 'dry_run', default=False),
'concurrency': {
'crypto_processes': _merge_setting(
cli_options, config['options']['concurrency'],
'crypto_processes', default=0),
'disk_threads': _merge_setting(
cli_options, config['options']['concurrency'],
'disk_threads', default=0),
'md5_processes': _merge_setting(
cli_options, config['options']['concurrency'],
'md5_processes', default=0),
'transfer_threads': _merge_setting(
cli_options, config['options']['concurrency'],
'transfer_threads', default=0),
},
'proxy': {
'host': _merge_setting(
cli_options, config['options']['proxy'], 'host',
name_cli='proxy_host'),
'username': _merge_setting(
cli_options, config['options']['proxy'], 'username',
name_cli='proxy_username'),
'password': _merge_setting(
cli_options, config['options']['proxy'], 'password',
name_cli='proxy_password'),
}
}
config['options'] = options | [
"def",
"merge_global_settings",
"(",
"config",
",",
"cli_options",
")",
":",
"# type: (dict, dict) -> None",
"# check for valid version from YAML",
"if",
"(",
"not",
"blobxfer",
".",
"util",
".",
"is_none_or_empty",
"(",
"config",
")",
"and",
"(",
"'version'",
"not",
... | 44.288462 | 13.182692 |
def make_curve(report, success_name, fail_names):
"""
Make a success-failure curve.
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:returns:
fail_optimal: list of failure rates on adversarial data for the optimal
(t >= .5) part of the curve. Each entry corresponds to a different
threshold. Thresholds are chosen to make the smoothest possible curve
from the available data, e.g. one threshold between each unique
confidence value observed in the data. To make sure that linear
interpolation between points in the curve never overestimates the
failure rate for a specific success rate, the curve also includes
extra points that increment the failure rate prior to any point
that increments the success rate, so the curve moves up and to the
right in a series of backwards "L" shapes rather than moving up
and to the right along diagonal lines. For large datasets these
maximally pessimistic points will usually not be visible and the
curve will appear smooth.
success_optimal: list of success rates on clean data on the optimal
part of the curve. Matches up with `fail_optimal`.
fail_lower_bound: list of observed failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal.
fail_upper_bound: list of upper bounds (assuming good enough optimization,
so not a true upper bound) on the failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal. Matches up with
`fail_lower_bound`.
success_bounded: success rates on the non-optimal part of the curve.
Matches up with `fail_lower_bound` and `fail_upper_bound`.
"""
success_results = report[success_name]
fail_name = None # pacify pylint
found = False
for fail_name in fail_names:
if fail_name in report:
found = True
break
if not found:
raise ValueError(fail_name + " not in report."
"Available keys: " + str(report.keys()))
fail_results = report[fail_name]
# "good" means drawn from the distribution where we measure success rate.
# "bad" means drawn from the distribution where we measure failure rate.
# From here on out we use those terms, to avoid confusion between examples
# that actually failed and examples that were drawn from the distribution
# where we measured failure rate.
old_all_probs_version = False
if isinstance(success_results, dict):
# This dictionary key lookup will trigger a deprecation warning if `success_results` is not the old dictionary
# style of report, so we don't want to do a dictionary lookup unless we really are using the old version.
old_all_probs_version = 'all_probs' in success_results
if old_all_probs_version:
warnings.warn("The 'all_probs' key is included only to support "
" old files from a private development codebase. "
"Support for this key can be dropped at any time "
" without warning.")
good_probs = success_results['all_probs']
bad_probs = fail_results['all_probs']
bad_corrects = fail_results['correctness_mask']
good_corrects = success_results['correctness_mask']
else:
if isinstance(success_results, dict):
# Still using dict, but using newer key names
warnings.warn("Support for dictionary confidence reports is deprecated. Switch to using the classes in "
"cleverhans.confidence_report. Support for old dictionary-style reports may be removed "
"on or after 2019-07-19.")
good_probs = success_results['confidence']
bad_probs = fail_results['confidence']
good_corrects = success_results['correctness']
bad_corrects = fail_results['correctness']
else:
# current version
good_probs = success_results.confidence
bad_probs = fail_results.confidence
good_corrects = success_results.correctness
bad_corrects = fail_results.correctness
good_triplets = [(prob, correct, True) for prob, correct
in safe_zip(good_probs, good_corrects)]
bad_triplets = [(prob, correct, False) for prob, correct
in safe_zip(bad_probs, bad_corrects)]
total_good = len(good_triplets)
total_bad = len(bad_triplets)
if total_good != 10000:
warnings.warn("Not using full test set? Found " + str(total_good) +
" examples for measuring success rate")
if total_bad != 10000:
warnings.warn("Not using full test set for adversarial examples?")
all_triplets = good_triplets + bad_triplets
all_triplets = sorted(all_triplets, key=lambda x: -x[0])
# Start with the case for threshold t = 1.
# Examples are covered only if prob > t (strict inequality)
# So initially nothing is covered
good_covered_and_correct = 0
bad_covered_and_incorrect = 0
# Number of examples that are bad, incorrect, and covered by
# a t >= 0.5, or that were merely covered by a t < 0.5
failure_opportunities = 0
next_idx = 0
fail_optimal = []
success_optimal = []
fail_upper_bound = []
fail_lower_bound = []
success_bounded = []
bounded = False
# NOTE: the loop always exits via an internal break statement.
# Copied the termination condition to the while statement for ease
# of reading.
while next_idx < len(all_triplets):
gs = float(good_covered_and_correct) / total_good
bf = float(bad_covered_and_incorrect) / total_bad
# Add results for current threshold to the list
if not bounded:
# Sometimes when there are big jumps the failure rate it makes
# artifacts in the plot, where there's a long linear track.
# This implies the real success-fail curve is linear when
# actually it just isn't sampled by the data.
# To avoid implying that the model reaches a higher success
# rate than it actually does, we avoid these plotting artifacts
# by introducing extra points that make the graph move horizontally
# to the right first, then vertically.
if len(fail_optimal) > 0:
prev_bf = fail_optimal[-1]
prev_gs = success_optimal[-1]
if gs > prev_gs and bf > prev_bf:
fail_optimal.append(bf)
success_optimal.append(prev_gs)
success_optimal.append(gs)
fail_optimal.append(bf)
else:
success_bounded.append(gs)
fail_lower_bound.append(bf)
fail_upper_bound.append(float(failure_opportunities) / total_bad)
if next_idx == len(all_triplets):
break
# next_prob_to_include is not quite the same thing as the threshold.
# The threshold is infinitesimally smaller than this value.
next_prob_to_include = all_triplets[next_idx][0]
# Process all ties
while next_prob_to_include == all_triplets[next_idx][0]:
_prob, correct, is_good = all_triplets[next_idx]
if is_good:
good_covered_and_correct += correct
else:
if next_prob_to_include <= .5:
failure_opportunities += 1
else:
failure_opportunities += 1 - correct
bad_covered_and_incorrect += 1 - correct
next_idx += 1
if next_idx == len(all_triplets):
break
if next_prob_to_include <= .5:
bounded = True
out = (fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound,
success_bounded)
return out | [
"def",
"make_curve",
"(",
"report",
",",
"success_name",
",",
"fail_names",
")",
":",
"success_results",
"=",
"report",
"[",
"success_name",
"]",
"fail_name",
"=",
"None",
"# pacify pylint",
"found",
"=",
"False",
"for",
"fail_name",
"in",
"fail_names",
":",
"... | 41.885714 | 20.674286 |
def matches(self, properties):
"""
Tests if the given criterion matches this LDAP criterion
:param properties: A dictionary of properties
:return: True if the properties matches this criterion, else False
"""
try:
# Use the comparator
return self.comparator(self.value, properties[self.name])
except KeyError:
# Criterion key is not in the properties
return False | [
"def",
"matches",
"(",
"self",
",",
"properties",
")",
":",
"try",
":",
"# Use the comparator",
"return",
"self",
".",
"comparator",
"(",
"self",
".",
"value",
",",
"properties",
"[",
"self",
".",
"name",
"]",
")",
"except",
"KeyError",
":",
"# Criterion k... | 35.076923 | 17.692308 |
def fixed_vectors_encoding(index_encoded_sequences, letter_to_vector_df):
"""
Given a `n` x `k` matrix of integers such as that returned by `index_encoding()` and
a dataframe mapping each index to an arbitrary vector, return a `n * k * m`
array where the (`i`, `j`)'th element is `letter_to_vector_df.iloc[sequence[i][j]]`.
The dataframe index and columns names are ignored here; the indexing is done
entirely by integer position in the dataframe.
Parameters
----------
index_encoded_sequences : `n` x `k` array of integers
letter_to_vector_df : pandas.DataFrame of shape (`alphabet size`, `m`)
Returns
-------
numpy.array of integers with shape (`n`, `k`, `m`)
"""
(num_sequences, sequence_length) = index_encoded_sequences.shape
target_shape = (
num_sequences, sequence_length, letter_to_vector_df.shape[0])
result = letter_to_vector_df.iloc[
index_encoded_sequences.flatten()
].values.reshape(target_shape)
return result | [
"def",
"fixed_vectors_encoding",
"(",
"index_encoded_sequences",
",",
"letter_to_vector_df",
")",
":",
"(",
"num_sequences",
",",
"sequence_length",
")",
"=",
"index_encoded_sequences",
".",
"shape",
"target_shape",
"=",
"(",
"num_sequences",
",",
"sequence_length",
","... | 38.230769 | 25.461538 |
def _numpy24to32bit(data:np.ndarray, bigendian:bool=False) -> np.ndarray:
"""
data is a ubyte array of shape = (size,)
(interleaved channels if multichannel)
"""
target = np.zeros((data.shape[0] * 4 / 3,), dtype=np.ubyte)
if not bigendian:
target[3::4] = data[2::3]
target[2::4] = data[1::3]
target[1::4] = data[0::3]
else:
target[1::4] = data[2::3]
target[2::4] = data[1::3]
target[3::4] = data[0::3]
del data
targetraw = target.tostring()
del target
data = np.fromstring(targetraw, dtype=np.int32)
return data | [
"def",
"_numpy24to32bit",
"(",
"data",
":",
"np",
".",
"ndarray",
",",
"bigendian",
":",
"bool",
"=",
"False",
")",
"->",
"np",
".",
"ndarray",
":",
"target",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
"*",
"4",
"/",
... | 31.052632 | 13.263158 |
def _initialize_slots(self, seed, hashvalues):
'''Initialize the slots of the LeanMinHash.
Args:
seed (int): The random seed controls the set of random
permutation functions generated for this LeanMinHash.
hashvalues: The hash values is the internal state of the LeanMinHash.
'''
self.seed = seed
self.hashvalues = self._parse_hashvalues(hashvalues) | [
"def",
"_initialize_slots",
"(",
"self",
",",
"seed",
",",
"hashvalues",
")",
":",
"self",
".",
"seed",
"=",
"seed",
"self",
".",
"hashvalues",
"=",
"self",
".",
"_parse_hashvalues",
"(",
"hashvalues",
")"
] | 42.1 | 24.5 |
def create_deep_linking_urls(self, url_params):
"""
Bulk Creates Deep Linking URLs
See the URL https://dev.branch.io/references/http_api/#bulk-creating-deep-linking-urls
:param url_params: Array of values returned from "create_deep_link_url(..., skip_api_call=True)"
:return: The response
"""
url = "/v1/url/bulk/%s" % self.branch_key
method = "POST"
# Checks params
self._check_param(value=url_params, type=list, sub_type=dict, optional=False)
return self.make_api_call(method, url, json_params=url_params) | [
"def",
"create_deep_linking_urls",
"(",
"self",
",",
"url_params",
")",
":",
"url",
"=",
"\"/v1/url/bulk/%s\"",
"%",
"self",
".",
"branch_key",
"method",
"=",
"\"POST\"",
"# Checks params",
"self",
".",
"_check_param",
"(",
"value",
"=",
"url_params",
",",
"type... | 34.352941 | 26.823529 |
def get_resource_usage(self):
"""GetResourceUsage.
[Preview API] Gets information about build resources in the system.
:rtype: :class:`<BuildResourceUsage> <azure.devops.v5_0.build.models.BuildResourceUsage>`
"""
response = self._send(http_method='GET',
location_id='3813d06c-9e36-4ea1-aac3-61a485d60e3d',
version='5.0-preview.2')
return self._deserialize('BuildResourceUsage', response) | [
"def",
"get_resource_usage",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'3813d06c-9e36-4ea1-aac3-61a485d60e3d'",
",",
"version",
"=",
"'5.0-preview.2'",
")",
"return",
"self",
".",
"_... | 54.111111 | 21.111111 |
def sortino_ratio(returns,
required_return=0,
period=DAILY,
annualization=None,
out=None,
_downside_risk=None):
"""
Determines the Sortino ratio of a strategy.
Parameters
----------
returns : pd.Series or np.ndarray or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
_downside_risk : float, optional
The downside risk of the given inputs, if known. Will be calculated if
not provided.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
sortino_ratio : float or pd.Series
depends on input type
series ==> float
DataFrame ==> pd.Series
Note
-----
See `<https://www.sunrisecapital.com/wp-content/uploads/2014/06/Futures_
Mag_Sortino_0213.pdf>`__ for more details.
"""
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:])
return_1d = returns.ndim == 1
if len(returns) < 2:
out[()] = np.nan
if return_1d:
out = out.item()
return out
adj_returns = np.asanyarray(_adjust_returns(returns, required_return))
ann_factor = annualization_factor(period, annualization)
average_annual_return = nanmean(adj_returns, axis=0) * ann_factor
annualized_downside_risk = (
_downside_risk
if _downside_risk is not None else
downside_risk(returns, required_return, period, annualization)
)
np.divide(average_annual_return, annualized_downside_risk, out=out)
if return_1d:
out = out.item()
elif isinstance(returns, pd.DataFrame):
out = pd.Series(out)
return out | [
"def",
"sortino_ratio",
"(",
"returns",
",",
"required_return",
"=",
"0",
",",
"period",
"=",
"DAILY",
",",
"annualization",
"=",
"None",
",",
"out",
"=",
"None",
",",
"_downside_risk",
"=",
"None",
")",
":",
"allocated_output",
"=",
"out",
"is",
"None",
... | 29.746835 | 20.531646 |
def select_neighbors_by_layer(docgraph, node, layer, data=False):
"""
Get all neighboring nodes belonging to (any of) the given layer(s),
A neighboring node is a node that the given node connects to with an
outgoing edge.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str or collection of str
name(s) of the layer(s)
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of neighbor node IDs
that are present in the given layer. If data is True,
a generator of (node ID, node attrib dict) tuples.
"""
for node_id in docgraph.neighbors_iter(node):
node_layers = docgraph.node[node_id]['layers']
if isinstance(layer, (str, unicode)):
condition = layer in node_layers
else: # ``layer`` is a list/set/dict of layers
condition = any(l in node_layers for l in layer)
if condition:
yield (node_id, docgraph.node[node_id]) if data else (node_id) | [
"def",
"select_neighbors_by_layer",
"(",
"docgraph",
",",
"node",
",",
"layer",
",",
"data",
"=",
"False",
")",
":",
"for",
"node_id",
"in",
"docgraph",
".",
"neighbors_iter",
"(",
"node",
")",
":",
"node_layers",
"=",
"docgraph",
".",
"node",
"[",
"node_i... | 37.83871 | 20.225806 |
def scheme_specification(cls):
""" :meth:`.WSchemeHandler.scheme_specification` method implementation
"""
return WSchemeSpecification(
'ftp',
WURIComponentVerifier(WURI.Component.username, WURIComponentVerifier.Requirement.optional),
WURIComponentVerifier(WURI.Component.password, WURIComponentVerifier.Requirement.optional),
WURIComponentVerifier(WURI.Component.hostname, WURIComponentVerifier.Requirement.required),
WURIComponentVerifier(WURI.Component.path, WURIComponentVerifier.Requirement.optional)
) | [
"def",
"scheme_specification",
"(",
"cls",
")",
":",
"return",
"WSchemeSpecification",
"(",
"'ftp'",
",",
"WURIComponentVerifier",
"(",
"WURI",
".",
"Component",
".",
"username",
",",
"WURIComponentVerifier",
".",
"Requirement",
".",
"optional",
")",
",",
"WURICom... | 52 | 29.9 |
def script_item_encode(self, target_system, target_component, seq, name):
'''
Message encoding a mission script item. This message is emitted upon a
request for the next script item.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seq : Sequence (uint16_t)
name : The name of the mission script, NULL terminated. (char)
'''
return MAVLink_script_item_message(target_system, target_component, seq, name) | [
"def",
"script_item_encode",
"(",
"self",
",",
"target_system",
",",
"target_component",
",",
"seq",
",",
"name",
")",
":",
"return",
"MAVLink_script_item_message",
"(",
"target_system",
",",
"target_component",
",",
"seq",
",",
"name",
")"
] | 52.583333 | 32.916667 |
def to_color(self, value, maxvalue, scale, minvalue=0.0):
"""
convert continuous values into colors using matplotlib colorscales
:param value: value to be converted
:param maxvalue: max value in the colorscale
:param scale: lin, log, sqrt
:param minvalue: minimum of the input values in linear scale (default is 0)
:return: the color corresponding to the value
"""
if scale == 'lin':
if minvalue >= maxvalue:
raise Exception('minvalue must be less than maxvalue')
else:
value = 1.*(value-minvalue) / (maxvalue-minvalue)
elif scale == 'log':
if value < 1 or maxvalue <= 1:
raise Exception('value and maxvalue must be >= 1')
else:
value = math.log(value) / math.log(maxvalue)
elif scale == 'sqrt':
if value < 0 or maxvalue <= 0:
raise Exception('value and maxvalue must be greater than 0')
else:
value = math.sqrt(value) / math.sqrt(maxvalue)
else:
raise Exception('scale must be "lin", "log", or "sqrt"')
if value < 0:
value = 0
elif value > 1:
value = 1
value = int(1.*self.levels*value)*1./(self.levels-1)
if value not in self.mapping:
self.mapping[value] = _convert_color_format(self.cmap(value), self.alpha)
return self.mapping[value] | [
"def",
"to_color",
"(",
"self",
",",
"value",
",",
"maxvalue",
",",
"scale",
",",
"minvalue",
"=",
"0.0",
")",
":",
"if",
"scale",
"==",
"'lin'",
":",
"if",
"minvalue",
">=",
"maxvalue",
":",
"raise",
"Exception",
"(",
"'minvalue must be less than maxvalue'"... | 38.526316 | 19.842105 |
def untldict2py(untl_dict):
"""Convert a UNTL dictionary into a Python object."""
# Create the root element.
untl_root = PYUNTL_DISPATCH['metadata']()
untl_py_list = []
for element_name, element_list in untl_dict.items():
# Loop through the element dictionaries in the element list.
for element_dict in element_list:
qualifier = element_dict.get('qualifier', None)
content = element_dict.get('content', None)
child_list = []
# Handle content that is children elements.
if isinstance(content, dict):
for key, value in content.items():
child_list.append(
PYUNTL_DISPATCH[key](content=value),
)
# Create the UNTL element that will have children elements
# added to it.
if qualifier is not None:
untl_element = PYUNTL_DISPATCH[element_name](
qualifier=qualifier
)
else:
untl_element = PYUNTL_DISPATCH[element_name]()
# Add the element's children to the element.
for child in child_list:
untl_element.add_child(child)
# If not child element, create the element and
# add qualifier and content as available.
elif content is not None and qualifier is not None:
untl_element = PYUNTL_DISPATCH[element_name](
qualifier=qualifier,
content=content,
)
elif qualifier is not None:
untl_element = PYUNTL_DISPATCH[element_name](
qualifier=qualifier,
)
elif content is not None:
untl_element = PYUNTL_DISPATCH[element_name](
content=content,
)
# Create element that only has children.
elif len(child_list) > 0:
untl_element = PYUNTL_DISPATCH[element_name]()
# Add the UNTL element to the Python element list.
untl_py_list.append(untl_element)
# Add the UNTL elements to the root element.
for untl_element in untl_py_list:
untl_root.add_child(untl_element)
return untl_root | [
"def",
"untldict2py",
"(",
"untl_dict",
")",
":",
"# Create the root element.",
"untl_root",
"=",
"PYUNTL_DISPATCH",
"[",
"'metadata'",
"]",
"(",
")",
"untl_py_list",
"=",
"[",
"]",
"for",
"element_name",
",",
"element_list",
"in",
"untl_dict",
".",
"items",
"("... | 44.25 | 12.807692 |
def _combine_results(self, match_as_dict):
'''Combine results from different parsed parts:
we look for non-empty results in values like
'postal_code_b' or 'postal_code_c' and store
them as main value.
So 'postal_code_b':'123456'
becomes:
'postal_code' :'123456'
'''
keys = []
vals = []
for k, v in six.iteritems(match_as_dict):
if k[-2:] in '_a_b_c_d_e_f_g_h_i_j_k_l_m':
if v:
# strip last 2 chars: '..._b' -> '...'
keys.append(k[:-2])
vals.append(v)
else:
if k not in keys:
keys.append(k)
vals.append(v)
return dict(zip(keys, vals)) | [
"def",
"_combine_results",
"(",
"self",
",",
"match_as_dict",
")",
":",
"keys",
"=",
"[",
"]",
"vals",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"match_as_dict",
")",
":",
"if",
"k",
"[",
"-",
"2",
":",
"]",
"in",
... | 37.217391 | 12.26087 |
def goodnode(self, nodelist):
''' Goes through the provided list
and returns the first server node
that does not return an error.
'''
l = len(nodelist)
for n in range(self.current_node(l), l):
self.msg.message("Trying node " + str(n) + ": " + nodelist[n])
try:
req = urllib.request.Request(url=nodelist[n])
urllib.request.urlopen(req)
except HTTPError as e:
self.msg.error_message(e)
self.currentnode = int(self.currentnode) + 1
else:
self.msg.message("Using " + nodelist[n])
return nodelist[n] | [
"def",
"goodnode",
"(",
"self",
",",
"nodelist",
")",
":",
"l",
"=",
"len",
"(",
"nodelist",
")",
"for",
"n",
"in",
"range",
"(",
"self",
".",
"current_node",
"(",
"l",
")",
",",
"l",
")",
":",
"self",
".",
"msg",
".",
"message",
"(",
"\"Trying n... | 39.411765 | 13.058824 |
def connect_socket(root_dir):
"""Connect to a daemon's socket.
Args:
root_dir (str): The directory that used as root by the daemon.
Returns:
socket.socket: A socket that is connected to the daemon.
"""
# Get config directory where the daemon socket is located
config_dir = os.path.join(root_dir, '.config/pueue')
# Create Socket and exit with 1, if socket can't be created
try:
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
socket_path = os.path.join(config_dir, 'pueue.sock')
if os.path.exists(socket_path):
client.connect(socket_path)
else:
print("Socket doesn't exist")
raise Exception
except:
print("Error connecting to socket. Make sure the daemon is running")
sys.exit(1)
return client | [
"def",
"connect_socket",
"(",
"root_dir",
")",
":",
"# Get config directory where the daemon socket is located",
"config_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'.config/pueue'",
")",
"# Create Socket and exit with 1, if socket can't be created",
"tr... | 32.92 | 21.36 |
def Decode(self, encoded_data):
"""Decode the encoded data.
Args:
encoded_data (byte): encoded data.
Returns:
tuple(bytes, bytes): decoded data and remaining encoded data.
Raises:
BackEndError: if the base32 stream cannot be decoded.
"""
try:
decoded_data = base64.b32decode(encoded_data, casefold=False)
except (TypeError, binascii.Error) as exception:
raise errors.BackEndError(
'Unable to decode base32 stream with error: {0!s}.'.format(
exception))
return decoded_data, b'' | [
"def",
"Decode",
"(",
"self",
",",
"encoded_data",
")",
":",
"try",
":",
"decoded_data",
"=",
"base64",
".",
"b32decode",
"(",
"encoded_data",
",",
"casefold",
"=",
"False",
")",
"except",
"(",
"TypeError",
",",
"binascii",
".",
"Error",
")",
"as",
"exce... | 27.4 | 21.9 |
def start(self, retry_limit=None):
"""
Try to connect to Twitter's streaming API.
:param retry_limit: The maximum number of retries in case of failures. Default is None (unlimited)
:raises :class:`~tweepy.error.TweepyError`: If there's some critical API error
"""
# Run tweepy stream
wrapper_listener = TweepyWrapperListener(listener=self.listener)
stream = tweepy.Stream(auth=self.client.tweepy_api.auth, listener=wrapper_listener)
retry_counter = 0
while retry_limit is None or retry_counter <= retry_limit:
try:
retry_counter += 1
if not self.client.config.get('user_stream'):
logging.info('Listening to public stream')
stream.filter(follow=self.filter.follow, track=self.filter.track)
else:
if self.filter.follow:
logging.warning('Follow filters won\'t be used in user stream')
logging.info('Listening to user stream')
stream.userstream(track=self.filter.track)
except AttributeError as e:
# Known Tweepy's issue https://github.com/tweepy/tweepy/issues/576
if "'NoneType' object has no attribute 'strip'" in str(e):
pass
else:
raise | [
"def",
"start",
"(",
"self",
",",
"retry_limit",
"=",
"None",
")",
":",
"# Run tweepy stream",
"wrapper_listener",
"=",
"TweepyWrapperListener",
"(",
"listener",
"=",
"self",
".",
"listener",
")",
"stream",
"=",
"tweepy",
".",
"Stream",
"(",
"auth",
"=",
"se... | 45.8 | 24.666667 |
def get_pallete_length(grid):
"""
Takes a 2d grid and figures out how many different elements are in it, so
that we know how big to make the palette. Also avoids the unfortunate
red/green palette that results from too few elements.
Returns int indicating the length the palette should have.
"""
elements = list(set(flatten_array(grid)))
length = len(elements)
if type(elements[0]) is str:
lengths = [len(el) for el in elements if not el.startswith("-")]
if max(lengths) < 5: # Mixing red and green
length += 2 # is not pretty so let's avoid it
return length | [
"def",
"get_pallete_length",
"(",
"grid",
")",
":",
"elements",
"=",
"list",
"(",
"set",
"(",
"flatten_array",
"(",
"grid",
")",
")",
")",
"length",
"=",
"len",
"(",
"elements",
")",
"if",
"type",
"(",
"elements",
"[",
"0",
"]",
")",
"is",
"str",
"... | 38.375 | 19.5 |
def _run(self):
"""Internal function to run the impact function with profiling."""
LOGGER.info('ANALYSIS : The impact function is starting.')
step_count = len(analysis_steps)
self.callback(0, step_count, analysis_steps['initialisation'])
# Set a unique name for this impact
self._unique_name = self._name.replace(' ', '')
self._unique_name = replace_accentuated_characters(self._unique_name)
now = datetime.now()
date = now.strftime('%d%B%Y')
# We need to add milliseconds to be sure to have a unique name.
# Some tests are executed in less than a second.
time = now.strftime('%Hh%M-%S.%f')
self._unique_name = '%s_%s_%s' % (self._unique_name, date, time)
if not self._datastore:
# By default, results will go in a temporary folder.
# Users are free to set their own datastore with the setter.
self.callback(1, step_count, analysis_steps['data_store'])
default_user_directory = setting(
'defaultUserDirectory', default='')
if default_user_directory:
path = join(default_user_directory, self._unique_name)
if not exists(path):
makedirs(path)
self._datastore = Folder(path)
else:
self._datastore = Folder(temp_dir(sub_dir=self._unique_name))
self._datastore.default_vector_format = 'geojson'
LOGGER.info('Datastore : %s' % self.datastore.uri_path)
if self.debug_mode:
self._datastore.use_index = True
self.datastore.add_layer(self.exposure, 'exposure')
self.datastore.add_layer(self.hazard, 'hazard')
if self.aggregation:
self.datastore.add_layer(self.aggregation, 'aggregation')
self._performance_log = profiling_log()
self.callback(2, step_count, analysis_steps['pre_processing'])
self.pre_process()
self.callback(3, step_count, analysis_steps['aggregation_preparation'])
self.aggregation_preparation()
# Special case for earthquake hazard on population. We need to remove
# the fatality model.
earthquake_on_population = False
if self.hazard.keywords.get('hazard') == hazard_earthquake['key']:
if self.exposure.keywords.get('exposure') == \
exposure_population['key']:
earthquake_on_population = True
if not earthquake_on_population:
# This is not a EQ raster on raster population. We need to set it
# to None as we don't want notes specific to EQ raster on
# population.
self._earthquake_function = None
set_provenance(
self._provenance,
provenance_earthquake_function,
self._earthquake_function)
step_count = len(analysis_steps)
self._performance_log = profiling_log()
self.callback(4, step_count, analysis_steps['hazard_preparation'])
self.hazard_preparation()
self._performance_log = profiling_log()
self.callback(
5, step_count, analysis_steps['aggregate_hazard_preparation'])
self.aggregate_hazard_preparation()
self._performance_log = profiling_log()
self.callback(6, step_count, analysis_steps['exposure_preparation'])
self.exposure_preparation()
self._performance_log = profiling_log()
self.callback(7, step_count, analysis_steps['combine_hazard_exposure'])
self.intersect_exposure_and_aggregate_hazard()
self._performance_log = profiling_log()
self.callback(8, step_count, analysis_steps['post_processing'])
if is_vector_layer(self._exposure_summary):
# We post process the exposure summary
self.post_process(self._exposure_summary)
else:
# We post process the aggregate hazard.
# Raster continuous exposure.
self.post_process(self._aggregate_hazard_impacted)
# Quick hack if EQ on places, we do some ordering on the distance.
if self.exposure.keywords.get('exposure') == exposure_place['key']:
if self.hazard.keywords.get('hazard') == hazard_earthquake['key']:
if is_vector_layer(self._exposure_summary):
field = distance_field['field_name']
if self._exposure_summary.fields().lookupField(field) \
!= -1:
layer = create_memory_layer(
'ordered',
self._exposure_summary.geometryType(),
self._exposure_summary.crs(),
self._exposure_summary.fields())
layer.startEditing()
layer.keywords = copy_layer_keywords(
self._exposure_summary.keywords)
request = QgsFeatureRequest()
request.addOrderBy('"%s"' % field, True, False)
iterator = self._exposure_summary.getFeatures(request)
for feature in iterator:
layer.addFeature(feature)
layer.commitChanges()
self._exposure_summary = layer
self.debug_layer(self._exposure_summary)
self._performance_log = profiling_log()
self.callback(9, step_count, analysis_steps['summary_calculation'])
self.summary_calculation()
self._end_datetime = datetime.now()
set_provenance(
self._provenance, provenance_start_datetime, self.start_datetime)
set_provenance(
self._provenance, provenance_end_datetime, self.end_datetime)
set_provenance(
self._provenance, provenance_duration, self.duration)
self._generate_provenance()
# Update provenance with output layer path
output_layer_provenance = {
provenance_layer_exposure_summary['provenance_key']: None,
provenance_layer_aggregate_hazard_impacted['provenance_key']: None,
provenance_layer_aggregation_summary['provenance_key']: None,
provenance_layer_analysis_impacted['provenance_key']: None,
provenance_layer_exposure_summary_table['provenance_key']: None,
provenance_layer_profiling['provenance_key']: None,
provenance_layer_exposure_summary_id['provenance_key']: None,
provenance_layer_aggregate_hazard_impacted_id[
'provenance_key']: None,
provenance_layer_aggregation_summary_id['provenance_key']: None,
provenance_layer_analysis_impacted_id['provenance_key']: None,
provenance_layer_exposure_summary_table_id['provenance_key']: None,
}
# End of the impact function, we can add layers to the datastore.
# We replace memory layers by the real layer from the datastore.
# Exposure summary
if self._exposure_summary:
self._exposure_summary.keywords[
'provenance_data'] = self.provenance
append_ISO19115_keywords(
self._exposure_summary.keywords)
result, name = self.datastore.add_layer(
self._exposure_summary,
layer_purpose_exposure_summary['key'])
if not result:
raise Exception(
tr('Something went wrong with the datastore : '
'{error_message}').format(error_message=name))
self._exposure_summary = self.datastore.layer(name)
self.debug_layer(self._exposure_summary, add_to_datastore=False)
output_layer_provenance[provenance_layer_exposure_summary[
'provenance_key']] = full_layer_uri(self._exposure_summary)
output_layer_provenance[provenance_layer_exposure_summary_id[
'provenance_key']] = self._exposure_summary.id()
# Aggregate hazard impacted
if self.aggregate_hazard_impacted:
self.aggregate_hazard_impacted.keywords[
'provenance_data'] = self.provenance
append_ISO19115_keywords(
self.aggregate_hazard_impacted.keywords)
result, name = self.datastore.add_layer(
self._aggregate_hazard_impacted,
layer_purpose_aggregate_hazard_impacted['key'])
if not result:
raise Exception(
tr('Something went wrong with the datastore : '
'{error_message}').format(error_message=name))
self._aggregate_hazard_impacted = self.datastore.layer(name)
self.debug_layer(
self._aggregate_hazard_impacted, add_to_datastore=False)
output_layer_provenance[
provenance_layer_aggregate_hazard_impacted['provenance_key']
] = full_layer_uri(self.aggregate_hazard_impacted)
output_layer_provenance[
provenance_layer_aggregate_hazard_impacted_id['provenance_key']
] = self.aggregate_hazard_impacted.id()
# Exposure summary table
if self._exposure.keywords.get('classification'):
self._exposure_summary_table.keywords[
'provenance_data'] = self.provenance
append_ISO19115_keywords(
self._exposure_summary_table.keywords)
result, name = self.datastore.add_layer(
self._exposure_summary_table,
layer_purpose_exposure_summary_table['key'])
if not result:
raise Exception(
tr('Something went wrong with the datastore : '
'{error_message}').format(error_message=name))
self._exposure_summary_table = self.datastore.layer(name)
self.debug_layer(
self._exposure_summary_table, add_to_datastore=False)
output_layer_provenance[
provenance_layer_exposure_summary_table['provenance_key']
] = full_layer_uri(self._exposure_summary_table)
output_layer_provenance[
provenance_layer_exposure_summary_table_id['provenance_key']
] = self._exposure_summary_table.id()
# Aggregation summary
self.aggregation_summary.keywords['provenance_data'] = self.provenance
append_ISO19115_keywords(self.aggregation_summary.keywords)
result, name = self.datastore.add_layer(
self._aggregation_summary,
layer_purpose_aggregation_summary['key'])
if not result:
raise Exception(
tr('Something went wrong with the datastore : '
'{error_message}').format(error_message=name))
self._aggregation_summary = self.datastore.layer(name)
self.debug_layer(self._aggregation_summary, add_to_datastore=False)
output_layer_provenance[provenance_layer_aggregation_summary[
'provenance_key']] = full_layer_uri(self._aggregation_summary)
output_layer_provenance[provenance_layer_aggregation_summary_id[
'provenance_key']] = self._aggregation_summary.id()
# Analysis impacted
self.analysis_impacted.keywords['provenance_data'] = self.provenance
append_ISO19115_keywords(self.analysis_impacted.keywords)
result, name = self.datastore.add_layer(
self._analysis_impacted, layer_purpose_analysis_impacted['key'])
if not result:
raise Exception(
tr('Something went wrong with the datastore : '
'{error_message}').format(error_message=name))
self._analysis_impacted = self.datastore.layer(name)
self.debug_layer(self._analysis_impacted, add_to_datastore=False)
output_layer_provenance[provenance_layer_analysis_impacted[
'provenance_key']] = full_layer_uri(self._analysis_impacted)
output_layer_provenance[provenance_layer_analysis_impacted_id[
'provenance_key']] = self._analysis_impacted.id()
# Put profiling file path to the provenance
# FIXME(IS): Very hacky
if not self.debug_mode:
profiling_path = join(dirname(
self._analysis_impacted.source()),
layer_purpose_profiling['name'] + '.csv')
output_layer_provenance[
provenance_layer_profiling['provenance_key']] = profiling_path
# Update provenance data with output layers URI
self._provenance.update(output_layer_provenance)
if self._exposure_summary:
self._exposure_summary.keywords[
'provenance_data'] = self.provenance
write_iso19115_metadata(
self._exposure_summary.source(),
self._exposure_summary.keywords)
if self._aggregate_hazard_impacted:
self._aggregate_hazard_impacted.keywords[
'provenance_data'] = self.provenance
write_iso19115_metadata(
self._aggregate_hazard_impacted.source(),
self._aggregate_hazard_impacted.keywords)
if self._exposure_summary_table:
self._exposure_summary_table.keywords[
'provenance_data'] = self.provenance
write_iso19115_metadata(
self._exposure_summary_table.source(),
self._exposure_summary_table.keywords)
self.aggregation_summary.keywords['provenance_data'] = self.provenance
write_iso19115_metadata(
self.aggregation_summary.source(),
self.aggregation_summary.keywords)
self.analysis_impacted.keywords['provenance_data'] = self.provenance
write_iso19115_metadata(
self.analysis_impacted.source(),
self.analysis_impacted.keywords) | [
"def",
"_run",
"(",
"self",
")",
":",
"LOGGER",
".",
"info",
"(",
"'ANALYSIS : The impact function is starting.'",
")",
"step_count",
"=",
"len",
"(",
"analysis_steps",
")",
"self",
".",
"callback",
"(",
"0",
",",
"step_count",
",",
"analysis_steps",
"[",
"'in... | 46.525424 | 20.166102 |
def reset_all_to_coefficients(self):
""" Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
"""
self.reset_to_coefficients()
[ee.reset_to_coefficients() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to coefficients")
return self | [
"def",
"reset_all_to_coefficients",
"(",
"self",
")",
":",
"self",
".",
"reset_to_coefficients",
"(",
")",
"[",
"ee",
".",
"reset_to_coefficients",
"(",
")",
"for",
"ee",
"in",
"self",
".",
"get_extensions",
"(",
"data",
"=",
"True",
")",
"]",
"self",
".",... | 34.333333 | 22 |
def insert(self, table, kwargs, execute=True):
""".. :py:method::
Usage::
>>> insert('hospital', {'id': '12de3wrv', 'province': 'shanghai'})
insert into hospital (id, province) values ('12de3wrv', 'shanghai');
:param string table: table name
:param dict kwargs: name and value
:param bool execute: if not execute, return sql and variables
:rtype: tuple
"""
sql = "insert into " + table + " ({}) values ({});"
keys, values = [], []
[ (keys.append(k), values.append(v)) for k, v in kwargs.iteritems() ]
sql = sql.format(', '.join(keys), ', '.join(['%s']*len(values)))
if execute:
super(PGWrapper, self).execute(sql, values, result=False)
else:
return sql, values | [
"def",
"insert",
"(",
"self",
",",
"table",
",",
"kwargs",
",",
"execute",
"=",
"True",
")",
":",
"sql",
"=",
"\"insert into \"",
"+",
"table",
"+",
"\" ({}) values ({});\"",
"keys",
",",
"values",
"=",
"[",
"]",
",",
"[",
"]",
"[",
"(",
"keys",
".",... | 36.136364 | 23 |
def resolve_operands(self, encoding_map, operation, pc):
"""
Converts generic register references (such as $t0, $t1, etc), immediate values, and jump addresses
to their binary equivalents.
"""
convert = Encoder.to_binary
branch_replace = False
jump_replace = False
for operand, value in encoding_map.iteritems():
if (operand == 'rs' or operand == 'rt' or operand == 'rd'):
encoding_map[operand] = MIPS.registers[value]
elif (operand == 'imm'):
encoding_map[operand] = convert(int(value), MIPS.IMMEDIATE_SIZE)
elif (operand == 'addr'):
encoding_map[operand] = convert(int(value), MIPS.ADDRESS_SIZE)
elif (operand == 'shamt'):
encoding_map[operand] = convert(int(value), MIPS.SHAMT_SIZE)
elif (operand == 'label'):
label = encoding_map[operand]
hit, index = self.label_cache.query(label)
if not hit:
raise RuntimeError('No address found for label: {}'.format(label))
if ((operation == 'beq') or (operation == 'bne')):
# Calculate the relative instruction offset. The MIPS ISA uses
# PC + 4 + (branch offset) to resolve branch targets.
if index > pc:
encoding_map[operand] = convert(index - pc - 1, MIPS.IMMEDIATE_SIZE)
elif index < pc:
encoding_map[operand] = convert((pc + 1) - index, MIPS.IMMEDIATE_SIZE)
else:
# Not sure why a branch would resolve to itself, but ok
# (PC + 4) - 4 =
encoding_map[operand] = convert(-1, MIPS.IMMEDIATE_SIZE)
branch_replace = True
elif ((operation == 'j') or (operation == 'jal')):
# Jump addresses are absolute
encoding_map[operand] = convert(index, MIPS.ADDRESS_SIZE)
jump_replace = True
# Need to convert references to 'label' back to references the instruction
# encoding string recognizes, otherwise we end up with the default value (zero)
# This doesn't feel very clean, but working on a fix.
if branch_replace:
encoding_map['imm'] = encoding_map['label']
elif jump_replace:
encoding_map['addr'] = encoding_map['label'] | [
"def",
"resolve_operands",
"(",
"self",
",",
"encoding_map",
",",
"operation",
",",
"pc",
")",
":",
"convert",
"=",
"Encoder",
".",
"to_binary",
"branch_replace",
"=",
"False",
"jump_replace",
"=",
"False",
"for",
"operand",
",",
"value",
"in",
"encoding_map",... | 44.836364 | 24.4 |
async def Set(self, annotations):
'''
annotations : typing.Sequence[~EntityAnnotations]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Annotations',
request='Set',
version=2,
params=_params)
_params['annotations'] = annotations
reply = await self.rpc(msg)
return reply | [
"async",
"def",
"Set",
"(",
"self",
",",
"annotations",
")",
":",
"# map input types to rpc msg",
"_params",
"=",
"dict",
"(",
")",
"msg",
"=",
"dict",
"(",
"type",
"=",
"'Annotations'",
",",
"request",
"=",
"'Set'",
",",
"version",
"=",
"2",
",",
"param... | 32.357143 | 11.785714 |
def FIR_fix_header(fname_out, h):
"""
Write FIR Fixed-Point Filter Header Files
Mark Wickert February 2015
"""
M = len(h)
hq = int16(rint(h * 2 ** 15))
N = 8 # Coefficients per line
f = open(fname_out, 'wt')
f.write('//define a FIR coefficient Array\n\n')
f.write('#include <stdint.h>\n\n')
f.write('#ifndef M_FIR\n')
f.write('#define M_FIR %d\n' % M)
f.write('#endif\n')
f.write('/************************************************************************/\n');
f.write('/* FIR Filter Coefficients */\n');
f.write('int16_t h_FIR[M_FIR] = {')
kk = 0;
for k in range(M):
# k_mod = k % M
if (kk < N - 1) and (k < M - 1):
f.write('%5d,' % hq[k])
kk += 1
elif (kk == N - 1) & (k < M - 1):
f.write('%5d,\n' % hq[k])
if k < M:
f.write(' ')
kk = 0
else:
f.write('%5d' % hq[k])
f.write('};\n')
f.write('/************************************************************************/\n')
f.close() | [
"def",
"FIR_fix_header",
"(",
"fname_out",
",",
"h",
")",
":",
"M",
"=",
"len",
"(",
"h",
")",
"hq",
"=",
"int16",
"(",
"rint",
"(",
"h",
"*",
"2",
"**",
"15",
")",
")",
"N",
"=",
"8",
"# Coefficients per line\r",
"f",
"=",
"open",
"(",
"fname_ou... | 34.235294 | 15.176471 |
def __get_query_agg_ts(cls, field, time_field, interval=None,
time_zone=None, start=None, end=None,
agg_type='count', offset=None):
"""
Create an es_dsl aggregation object for getting the time series values for a field.
:param field: field to get the time series values
:param time_field: field with the date
:param interval: interval to be used to generate the time series values, such as:(year(y),
quarter(q), month(M), week(w), day(d), hour(h), minute(m), second(s))
:param time_zone: time zone for the time_field
:param start: date from for the time series, should be a datetime.datetime object
:param end: date to for the time series, should be a datetime.datetime object
:param agg_type: kind of aggregation for the field (cardinality, avg, percentiles)
:param offset: offset to be added to the time_field in days
:return: a aggregation object to calculate timeseries values of a field
"""
""" Time series for an aggregation metric """
if not interval:
interval = '1M'
if not time_zone:
time_zone = 'UTC'
if not field:
field_agg = ''
else:
if agg_type == "cardinality":
agg_id, field_agg = cls.__get_query_agg_cardinality(field, agg_id=cls.AGGREGATION_ID + 1)
elif agg_type == "avg":
agg_id, field_agg = cls.__get_query_agg_avg(field, agg_id=cls.AGGREGATION_ID + 1)
elif agg_type == "percentiles":
agg_id, field_agg = cls.__get_query_agg_percentiles(field, agg_id=cls.AGGREGATION_ID + 1)
else:
raise RuntimeError("Aggregation of %s in ts not supported" % agg_type)
bounds = {}
if start or end:
if not offset:
# With offset and quarter interval bogus buckets are added
# to the start and to the end if extended_bounds is used
# https://github.com/elastic/elasticsearch/issues/23776
bounds = cls.__get_bounds(start, end)
else:
bounds = {'offset': offset}
query_agg = A("date_histogram", field=time_field, interval=interval,
time_zone=time_zone, min_doc_count=0, **bounds)
agg_dict = field_agg.to_dict()[field_agg.name]
query_agg.bucket(agg_id, field_agg.name, **agg_dict)
return (cls.AGGREGATION_ID, query_agg) | [
"def",
"__get_query_agg_ts",
"(",
"cls",
",",
"field",
",",
"time_field",
",",
"interval",
"=",
"None",
",",
"time_zone",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"agg_type",
"=",
"'count'",
",",
"offset",
"=",
"None",
")",... | 48.192308 | 26.634615 |
def fun_wv(xchannel, crpix1, crval1, cdelt1):
"""Compute wavelengths from channels.
The wavelength calibration is provided through the usual parameters
CRPIX1, CRVAL1 and CDELT1.
Parameters
----------
xchannel : numpy array
Input channels where the wavelengths will be evaluated.
crpix1: float
CRPIX1 keyword.
crval1: float
CRVAL1 keyword.
cdelt1: float
CDELT1 keyword.
Returns
-------
wv : numpy array
Computed wavelengths
"""
wv = crval1 + (xchannel - crpix1) * cdelt1
return wv | [
"def",
"fun_wv",
"(",
"xchannel",
",",
"crpix1",
",",
"crval1",
",",
"cdelt1",
")",
":",
"wv",
"=",
"crval1",
"+",
"(",
"xchannel",
"-",
"crpix1",
")",
"*",
"cdelt1",
"return",
"wv"
] | 22.4 | 21.52 |
def NewFromJSON(data):
"""
Create a new SharedFile instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a SharedFile.
Returns:
A SharedFile instance.
"""
return SharedFile(
sharekey=data.get('sharekey', None),
name=data.get('name', None),
user=User.NewFromJSON(data.get('user', None)),
title=data.get('title', None),
description=data.get('description', None),
posted_at=data.get('posted_at', None),
permalink=data.get('permalink', None),
width=data.get('width', None),
height=data.get('height', None),
views=data.get('views', 0),
likes=data.get('likes', 0),
saves=data.get('saves', 0),
comments=data.get('comments', None),
nsfw=data.get('nsfw', False),
image_url=data.get('image_url', None),
source_url=data.get('source_url', None),
saved=data.get('saved', False),
liked=data.get('liked', False),
) | [
"def",
"NewFromJSON",
"(",
"data",
")",
":",
"return",
"SharedFile",
"(",
"sharekey",
"=",
"data",
".",
"get",
"(",
"'sharekey'",
",",
"None",
")",
",",
"name",
"=",
"data",
".",
"get",
"(",
"'name'",
",",
"None",
")",
",",
"user",
"=",
"User",
"."... | 36.3 | 11.766667 |
def dumpBlock(self, block_name):
"""
API the list all information related with the block_name
:param block_name: Name of block to be dumped (Required)
:type block_name: str
"""
try:
return self.dbsBlock.dumpBlock(block_name)
except HTTPError as he:
raise he
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/dumpBlock. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError) | [
"def",
"dumpBlock",
"(",
"self",
",",
"block_name",
")",
":",
"try",
":",
"return",
"self",
".",
"dbsBlock",
".",
"dumpBlock",
"(",
"block_name",
")",
"except",
"HTTPError",
"as",
"he",
":",
"raise",
"he",
"except",
"dbsException",
"as",
"de",
":",
"dbsE... | 39.833333 | 22.055556 |
def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices,
n_samples, i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
_sparsetools.csr_sample_offsets(M, N, self.indptr,
self.indices, n_samples, i, j,
offsets)
# only assign zeros to the existing sparsity structure
self.data[list(offsets[offsets > -1])] = 0 | [
"def",
"_zero_many",
"(",
"self",
",",
"i",
",",
"j",
")",
":",
"i",
",",
"j",
",",
"M",
",",
"N",
"=",
"self",
".",
"_prepare_indices",
"(",
"i",
",",
"j",
")",
"n_samples",
"=",
"len",
"(",
"i",
")",
"offsets",
"=",
"np",
".",
"empty",
"(",... | 38.65 | 18.7 |
def _update_bcbiovm():
"""Update or install a local bcbiovm install with tools and dependencies.
"""
print("## CWL support with bcbio-vm")
python_env = "python=3"
conda_bin, env_name = _add_environment("bcbiovm", python_env)
channels = _get_conda_channels(conda_bin)
base_cmd = [conda_bin, "install", "--yes", "--name", env_name] + channels
subprocess.check_call(base_cmd + [python_env, "nomkl", "bcbio-nextgen"])
extra_uptodate = ["cromwell"]
subprocess.check_call(base_cmd + [python_env, "bcbio-nextgen-vm"] + extra_uptodate) | [
"def",
"_update_bcbiovm",
"(",
")",
":",
"print",
"(",
"\"## CWL support with bcbio-vm\"",
")",
"python_env",
"=",
"\"python=3\"",
"conda_bin",
",",
"env_name",
"=",
"_add_environment",
"(",
"\"bcbiovm\"",
",",
"python_env",
")",
"channels",
"=",
"_get_conda_channels"... | 50.636364 | 17.181818 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.