content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def xround(x, divisor=1):
"""Round to multiple of given number.
Parameters
----------
x : float
Number to round.
divisor : float
Number the result shall be a multiple of.
Returns
-------
float
`x` rounded to the closest multiple of `divisor`.
"""
return divisor * round(x / divisor) | 650bd15db3192431fd51cb41192050cd124872f2 | 692,175 |
def gc(s):
"""
return the percentage of dna composed of G+C
"""
gc = s.count('G') + s.count('C')
return gc *100 / len(s) | 4cc55f246f4b7ac667be98cc0245f5a8af74be0f | 692,176 |
def parse_boolean(s):
"""Takes a string and returns the equivalent as a boolean value."""
s = s.strip().lower()
if s in ("yes", "true", "on", "1"):
return True
elif s in ("no", "false", "off", "0", "none"):
return False
else:
raise ValueError("Invalid boolean value %r" % s) | f21e04817b63e49f88a9abb57f58ccd5032a9228 | 692,177 |
def _format_result(result):
"""Format result into string for templating."""
# do not include decimal if it's 100
if result == 100:
return "100"
return "{:.1f}".format(result) | fd386b200dac72cd1db3c104b9dc41b774d3a7d0 | 692,178 |
import os
def get_album_list(photos_basedir):
"""
Returns a sorted list of albums in the provided dir. Does not return absolute path, only name of album
:param photos_basedir:
:return:
"""
return [d for d in sorted(os.listdir(photos_basedir), reverse=True) if
not d.startswith('.') and os.path.isdir(os.path.join(photos_basedir, d))] | e96e59cb5e453b1e3b3f272ec1b19ef6e6867c87 | 692,179 |
def sumfeat(part_features, accumulator_features, feat):
"""a helper method that calculates the sum of a target feature over a list of objects
Returns:
the sum of the target feature over the given list of objects
"""
sum = 0
sum += eval('part_features'+feat)
if (eval('accumulator_features'+feat) != -1):
sum += eval('accumulator_features'+feat)
return sum | ce27aed7a7c085dc3028273ebeec8bcc784b6cbb | 692,180 |
import string
def sentence_preprocess(phrase):
"""preprocess a sentence: lowercase, clean up weird chars, remove punctuation"""
replacements = {
"½": "half",
"—": "-",
"™": "",
"¢": "cent",
"ç": "c",
"û": "u",
"é": "e",
"°": " degree",
"è": "e",
"…": "",
}
# phrase = phrase.encode('utf-8')
phrase = phrase.lstrip(" ").rstrip(" ")
for k, v in replacements.items():
phrase = phrase.replace(k, v)
# return str(phrase).lower().translate(None, string.punctuation).decode('utf-8', 'ignore')
return str(phrase).lower().translate(str.maketrans("", "", string.punctuation)) | 21f02bb713a6767c74cce2419e3d797b28b41f95 | 692,181 |
def get_relation_type(tree):
"""Return the RST relation type attached to the parent node of an
RST relation, e.g. `span`, `elaboration` or `antithesis`.
Parameters
----------
tree : nltk.tree.ParentedTree
a tree representing a rhetorical structure (or a part of it)
Returns
-------
relation_type : str
the type of the rhetorical relation that this (sub)tree represents
"""
return tree.label().split(':')[1] | c945d0618e9fb3ce063a611674bdb9bc4298fa11 | 692,182 |
def extract_model_and_compression_states(resuming_checkpoint):
"""
The function return from checkpoint state_dict and compression_state.
"""
if 'model' in resuming_checkpoint:
model_state_dict = resuming_checkpoint['model']
elif 'state_dict' in resuming_checkpoint:
model_state_dict = resuming_checkpoint['state_dict']
else:
model_state_dict = resuming_checkpoint
compression_state = resuming_checkpoint['compression_state']
return model_state_dict, compression_state | 7373faba20c40510aa8d342a397f1986b34ca440 | 692,183 |
def mime(mime):
"""
Constructs a decorator that sets the preferred mime type
to be written in the http response when returning the
function result.
"""
def dfn(fn):
fn.mime = mime
return fn
return dfn | 0e09e8601ff59cde49dde0ead777d2e9a2651686 | 692,184 |
import os
def convert_filename(txtfilename, outdir='.'):
"""Convert a .TXT filename to a Therion .TH filename"""
return os.path.join(outdir, os.path.basename(txtfilename)).rsplit('.', 1)[0] + '.th' | 27954a1e67c0fcdc1bc68615c52079ba96df466a | 692,185 |
def set_read_only_lvol_bdev(client, name):
"""Mark logical volume as read only.
Args:
name: name of logical volume to set as read only
"""
params = {
'name': name,
}
return client.call('set_read_only_lvol_bdev', params) | 3ceb305b2c5b72a7a205f887cdc36aab6b044856 | 692,186 |
import os
def modules_from_dir(mdir):
"""
Get a list of kernel modules from a directory
"""
mod_list = None
#
# Includes modules with known compressed extensions
#
if not os.path.exists(mdir):
print (f'Module directory bad : {mdir}')
return None
if not os.path.isdir(mdir):
print (f'Module directory must be a directory: {mdir}')
return None
mod_dir = os.path.abspath(mdir)
known_exts = ['.ko', '.ko.zst', '.ko.xz', '.ko.gz']
mod_list = []
scan = os.scandir(mod_dir)
for item in scan:
if item.is_file():
for ext in known_exts:
if item.name.endswith(ext):
mod_path = os.path.join(mod_dir, item.name)
mod_list.append(mod_path)
break
return mod_list | 1404a35b5281a2ac324ec725d91450102530ba3d | 692,187 |
def remove_well_known_protos(filenames):
"""Remove "well-known" protos for objc and cpp.
On those platforms we get these for free as a part of the protobuf runtime.
We only need them for nanopb.
Args:
filenames: A list of filenames, each naming a .proto file.
Returns:
The filenames with members of google/protobuf removed.
"""
return [f for f in filenames if 'protos/google/protobuf/' not in f] | 44e2872dd03f82bdc83bb83ffbeb3de01fcfdc39 | 692,188 |
import random
def greeting_response(text):
"""
This function returns a random greeting response to a users greeting.
"""
#bots greeting response
bot_greetings = ['hallå', 'hej', 'Hej där']
#user greetings
user_greetings = ['hej', 'hejsan', 'hallå']
for word in text.split():
if word in user_greetings:
return random.choice(bot_greetings) | c62521caf68c24a984faa8d3d6b9888f4224bb0d | 692,189 |
def TargetIndex(targetRow: int, targetColumn: int) -> int:
"""
1 2 3 4 5 6
---+---+---+---+---+---+---+
1 | 1 3 6 10 15 21
2 | 2 5 9 14 20
3 | 4 8 13 19
4 | 7 12 18
5 | 11 17
6 | 16
"""
target_index = (((targetRow + targetColumn - 1) ** 2 +
targetRow + targetColumn - 1) // 2) - (targetRow - 1)
return target_index | 3ad0e7d1932b0b2f8121347ab20d0db9dce18e2c | 692,190 |
def extraction(self, request):
""" Fetch HTTP Basic Auth credentials from the request.
"""
creds = request._authUserPW()
if creds is not None:
name, password = creds
return {'login': name, 'password': password}
return {} | 8a4bab163cb271c897c9aece58202d14ba863f5d | 692,191 |
def task_unittest():
"""Perform unittests."""
return {
"actions": ['python -m unittest -v -f test/*.py'],
"task_dep": ["translations"],
} | 9f9dc243b1926a37ae72e0b7db47c53a25ce4ed1 | 692,192 |
import os
def dir_is_session(path):
"""
Checks whether a given directory contains Manokee session.
:param path: Path to a directory.
:return: True if this directory contains Manokee session.
"""
return os.path.isdir(path) and os.path.isfile(os.path.join(path, "session.mnk")) | ae42605dc2ba97376cc1a3b25fd3a24f1644b0b5 | 692,193 |
def to_label(f):
"""
convert a float to a label that makes sense
"""
if f < 0.1:
return "%.3f" % f
elif f < 1.0:
return "%.2f" % f
elif f < 10.0:
return "%.1f" % f
else:
return "%d" % int(f) | 36a113208cffe2d2e5330509d46211d26d26de57 | 692,194 |
import hashlib
import binascii
def hash_password(password, salt, rounds, algorithm):
"""
Hashes the password with the salt and algorithm provided. The supported
algorithms are in PasswordHashParser.valid_algorithms.
Returns just the hash (not the full hash string). Returns None if an error
occurs.
Algorithms using the passlib library are returned in base64 format.
Algorithms using the hashlib library are returned in hex format.
"""
if algorithm == 'pbkdf2_sha256':
# Rounds must be set.
if rounds is None:
return None
result = hashlib.pbkdf2_hmac('sha256',
password.encode(), salt.encode(), rounds)
return binascii.hexlify(result).decode()
elif algorithm == 'md5':
# Rounds is ignored.
return hashlib.md5((salt + password).encode()).hexdigest()
return None | fccb42c6dac294a89d96668ac272eabfba841316 | 692,195 |
def conseq(cond, true, false):
"""
Behaves like the tenary operator.
"""
if cond:
return true
else:
return false | 9ed23d6d9aa6cc93c902247dd7842f4a86675b23 | 692,196 |
import configparser
def load_settings(filename='settings.ini'):
"""
Loads the settings.ini file which contains the organization name and personal access token
:param str filename: location of the file
:return: config object or None
"""
config = configparser.ConfigParser()
config.read(filename)
if not config.sections():
return None
else:
try:
if config['org']['name'] is not None and config['org']['pat'] is not None:
return config
except KeyError:
return None | ffc0c6c1e2f817db65b5c9101b098972de88529a | 692,198 |
from bs4 import BeautifulSoup
import re
import html
def __getCompanyMetadata(parsed: BeautifulSoup) -> dict:
"""Function to extract company Standard Industrial Classification (SIC)
code, SIC type (i.e. description), company location, state of incorporation,
and the end of its fiscal year.
Searches the raw HTML of the company identification section of the page
using regular expressions.
Arguments:
parsed {BeautifulSoup} -- Parsed HTML from company EDGAR filing.
Returns:
dict -- Company metadata with keys `sic`, `sic_type`, `location`,
`incorporation_state`, and `fiscal_year_end`.
"""
# Company metadata container
metadata_container = parsed.find('p', class_='identInfo')
# String representation of HTML (used in RegEx)
metadata_str = str(metadata_container)
# Dictionary for company metadata
company_metadata = dict()
# RegEx for extracting SIC and SIC type
sic_re = re.compile(r'SIC.+?:.+?(\d+?)<\/a> -(.+?)<br')
# Getting SIC and SIC type match
sic_matches = sic_re.findall(metadata_str)
# Saving SIC and stripped, HTML-parsed SIC type
company_metadata['sic'] = sic_matches[0][0]
company_metadata['sic_type'] = html.unescape(sic_matches[0][1]).strip()
# RegEx for extracting company location (state)
location_re = re.compile(r'State location:.+?>(\w+?)<\/a>')
# Getting company location
location_matches = location_re.findall(metadata_str)
# Saving company location
company_metadata['location'] = location_matches[0].strip()
# RegEx for extracting state of incorporation
incorp_state_re = re.compile(r'State of Inc\.:.+?>(\w+?)<\/strong>')
# Getting state of incorporation
incorp_match = incorp_state_re.findall(metadata_str)[0]
# Saving state of incorporation
company_metadata['incorporation_state'] = incorp_match.strip()
# RegEx for extracting end of fiscal year
fiscal_year_re = re.compile(r'Fiscal Year End:.+?(\d{4})')
# Getting end of fiscal year
fiscal_year_match = fiscal_year_re.findall(metadata_str)[0]
# Saving end of fiscal year (in mm-dd format)
fy_formatted = fiscal_year_match[0:2] + '-' + fiscal_year_match[2:]
company_metadata['fiscal_year_end'] = fy_formatted
return company_metadata | a9efbed062f8e6f9f43ba46d6753096df3c43e08 | 692,199 |
def is_form_persisted(form):
"""
Does the form have a model instance attached and it's not being added?
e.g. The form is about an existing Subgoal whose data is being edited.
"""
if form.instance and not form.instance._state.adding:
return True
else:
# Either the form has no instance attached or
# it has an instance that is being added.
return False | 621135c4a7577c42d79095881802a87bf5b44c77 | 692,200 |
def return_segments(shape, break_points):
"""Break a shape into segments between stops using break_points.
This function can use the `break_points` outputs from
`find_segments`, and cuts the shape-sequence into pieces
corresponding to each stop.
"""
# print 'xxx'
# print stops
# print shape
# print break_points
# assert len(stops) == len(break_points)
segs = []
bp = 0 # not used
bp2 = 0
for i in range(len(break_points)-1):
bp = break_points[i] if break_points[i] is not None else bp2
bp2 = break_points[i+1] if break_points[i+1] is not None else bp
segs.append(shape[bp:bp2+1])
segs.append([])
return segs | 28e5de7d74eb65ab718a9b6678616dc34430a299 | 692,201 |
def get_version(rel_path):
"""Given a path to a Python init file, return the version string."""
with open(rel_path, "r") as openfile:
lines = openfile.readlines()
for line in lines:
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.") | a2255a39130f9c5c0c4a1cf50a30b60c6673e174 | 692,202 |
import os
import re
import bisect
import json
def get_files(directory, extensions=None):
"""Gets the list of files in a directory and sub-directory
Args:
directory (str): path of directory
extensions (List[str]): list of extensions to search
"""
directory = os.path.realpath(directory)
if not os.path.exists(directory):
raise ValueError('Directory "{}" doesn\'t exist'.format(directory))
match_ext = "^.+(" + '|'.join(extensions) + ")$" if extensions else None
count_dirs = 0
count_files = 0
count_match_files = 0
all_files = []
inc = 0
for root, _, files in os.walk(directory):
count_dirs += 1
if root == directory:
rel_dir_path = root.replace(directory, './')
inc = 0
else:
rel_dir_path = root.replace(directory, '.')
inc = 1
for file in files:
count_files += 1
if match_ext is None or re.match(match_ext, file):
count_match_files += 1
bisect.insort(
all_files, (inc, os.path.join(rel_dir_path, file)))
print('--all_files', all_files)
print('--all_files.json', json.dumps(all_files))
return {
'files': [f[1] for f in all_files],
'count_dirs': count_dirs,
'count_files': count_files,
'count_match_files': count_match_files,
} | 3d75af4313dfe5ef5aeb7ec82b7f052cfc9ceb9a | 692,203 |
import lzma
def compress_lzma(data: bytes) -> bytes:
"""compresses data via lzma (unity specific)
The current static settings may not be the best solution,
but they are the most commonly used values and should therefore be enough for the time being.
:param data: uncompressed data
:type data: bytes
:return: compressed data
:rtype: bytes
"""
ec = lzma.LZMACompressor(
format=lzma.FORMAT_RAW,
filters=[
{"id": lzma.FILTER_LZMA1, "dict_size": 524288, "lc": 3, "lp": 0, "pb": 2, }
],
)
ec.compress(data)
return b"]\x00\x00\x08\x00" + ec.flush() | 06a476a2753be7d591051b9707cf5dbf58556086 | 692,205 |
def relatice_percent_difference(x,y):
"""
Calculates the relative percent difference of 2 numbers
|x-y|/avg(x,y) * 100
"""
average = abs(x + y) / 2
rpd = abs(x - y) / average * 100
return rpd | 1573d89ad38672db2a8e3665c307f98e5a9583db | 692,206 |
def get_linear_lambda_with_warmup(num_warmup_steps, epoch_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
num_training_steps = epoch_steps * 80
def lr_lambda(current_step):
if current_step < epoch_steps:
return 0
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return lr_lambda | b51d5d568a47646d44721c8ea6b2324ba4afdfb3 | 692,207 |
import os
def can_load(filename):
"""Returns 100 if this module can do a lossless load, 0 if it can't
load the file, and something inbetween if it can do a lossy load."""
return 100 if os.path.splitext(filename)[1].lower() == ".xbm" else 0 | 2531bfe920a8458fd601ad00dcb58462eb79db79 | 692,208 |
import re
def unixify_string(string):
"""
Sanitizes a string making it nice for unix by processing special characters.
Removes: ()!? and spaces,
Replaces with underscores: '/' and ' '
Parameters
----------
string : str
the string to sanitize
Returns
-------
str
the sanitized string
"""
return re.sub("['/]", '_', re.sub('[()!?,]', '', string)).replace(' ', '_') | 2231f911e4653ccae2363881cf1a94a5fb2cf366 | 692,209 |
def split_bulk_file_download(bulk_file_download):
"""
Args:
bulk_file_download (str): A bulk file download from Lichess.
Returns:
(list)
"""
return bulk_file_download.split("\n\n\n")[0:-1] | d7f567b24fcf033f6e370c8c9aca3ba644c7b5d0 | 692,210 |
import time
def time_string(short=False) -> str:
"""
Return time as string, used as random string
"""
year = time.localtime().tm_year
mon = time.localtime().tm_mon
day = time.localtime().tm_mday
hour = time.localtime().tm_hour
min = time.localtime().tm_min
sec = time.localtime().tm_sec
if short:
return "%02d%02d%02d" % (hour, min, sec)
else:
return "%04d%02d%02d_%02d%02d%02d" % (year, mon, day, hour, min, sec) | 03aec0c464747116093be38e03c85768bd55cccc | 692,211 |
def everyone(seq,method=bool):
"""Returns last that is true or first that is false"""
if not seq: return False
for s in seq:
if not method(s): return s if not s else None
return seq[-1] | f641f86c9ffb7d9e9f141e5cb27927cda113fd4a | 692,212 |
def getCounts(IDandRatingsTuple):
""" Calculate average rating
Args:
IDandRatingsTuple: a single tuple of (MovieID, (Rating1, Rating2, Rating3, ...))
Returns:
tuple: a tuple of (MovieID, number of ratings)
"""
return (IDandRatingsTuple[0], len(IDandRatingsTuple[1])) | c30f15a0404ba1ac54f003efedc05f8716dc3aac | 692,213 |
from typing import List
from typing import Any
def firstOrNone(list: List[Any]) -> Any:
"""Return the first element of a list or None
if 1) it is not set or 2) it is falsey"""
try:
return list[0]
except:
return None | c791c6dc996e8684633e14c660c4279064973be4 | 692,214 |
import ctypes
def parser(word, _, skip=False, **kwargs):
""" Parsers the pipe content
Args:
item (obj): The entry to process (a DotDict instance)
_ (None): Ignored.
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: exchangerate)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> _hash = ctypes.c_uint(hash('hello world')).value
>>> item = {'content': 'hello world'}
>>> kwargs = {'stream': item}
>>> parser(item['content'], None, **kwargs) == _hash
True
"""
return kwargs['stream'] if skip else ctypes.c_uint(hash(word)).value | 8f5b97e79aa3f21258f85557e2e2b911d87735e2 | 692,215 |
def numeric_validator(value):
"""Validator for numeric values."""
return isinstance(float(value), float) or isinstance(int(value), int) | 6181cd46aecef9f4a01576fa70a75428b4642f94 | 692,216 |
import plistlib
def fact():
"""
Returns the Zentral facts, that should have been saved during the preflight
"""
try:
with open("/usr/local/zentral/munki/facts.plist", "rb") as f:
return plistlib.load(f)
except FileNotFoundError:
return {} | fe2f908e3f6f2f92c869be82cff24580396b9939 | 692,217 |
def treat_income(data):
"""
Convert to doubles, or zero if NaN
"""
try:
return float(data)
except:
return 0.0 | 621489cb68e2ca0669ffea24c17d1191df80e932 | 692,218 |
def parse_message(message):
"""
!meme [meme name]; [(optional) text1]; [(optional) text2]
"""
args = []
template, top, bot = '', '', ''
try:
args = message.split('!meme')[1].split(';')
print(args)
cnt = len(args)
if cnt >= 1:
template = args[0].lstrip().split(' ')[0]
if cnt >= 1:
top = args[0].lstrip().split(' ')[1]
if cnt >= 2:
bot = args[1]
return {'template': template, 'top': top, 'bot': bot}
except Exception as e:
return False | 4c868460283d34df93ec2db6b49cc90e3e5d63c7 | 692,219 |
def parse_market(raw_market, sym_1, sym_2):
"""
>>> raw_market = 'BTCETH'
>>> parse_market(raw_market, 'ETH', 'BTC')
('BTC', 'ETH')
>>> parse_market("QTMBTC", 'QTM', 'BTC')
('QTM', 'BTC')
"""
if sym_1 not in raw_market or sym_2 not in raw_market:
return None
elif raw_market[0:len(sym_1)] == sym_1:
return (sym_1, sym_2)
else:
return (sym_2, sym_1) | 226b66b3bcba111ced863ea70313090c545da399 | 692,220 |
def round_to_n(x, n):
"""A rounding function
Args:
x (float): The number to be rounded
n (int): The number of digits to be rounded to.
Returns:
Rounded (float): The rounded number
Author:
SMM
"""
if n < 1:
raise ValueError("number of significant digits must be >= 1")
# Use %e format to get the n most significant digits, as a string.
format = "%." + str(n-1) + "e"
as_string = format % x
return float(as_string) | 51fcd5e03fb186a5c8b9554740e8e863e134ea4e | 692,221 |
import copy
import torch
def npvec_to_tensorlist(direction, params):
""" Convert a numpy vector to a list of tensors with the same shape as "params".
Args:
direction: a list of numpy vectors, e.g., a direction loaded from h5 file.
base: a list of parameter tensors from net
Returns:
a list of tensors with the same shape as base
"""
if isinstance(params, list):
w2 = copy.deepcopy(params)
idx = 0
for w in w2:
w.copy_(torch.tensor(direction[idx:idx + w.numel()]).view(w.size()))
idx += w.numel()
assert(idx == len(direction))
return w2
else:
s2 = []
idx = 0
for (k, w) in params.items():
s2.append(torch.Tensor(direction[idx:idx + w.numel()]).view(w.size()))
idx += w.numel()
assert(idx == len(direction))
return s2 | 4b7beb51ac2180f878ccfc91edeae27216867000 | 692,222 |
def get_nested_compat_files(compat_api_versions):
"""Return __init__.py file paths for files under nested compat modules.
A nested compat module contains two __init__.py files:
1. compat/vN/compat/vK/__init__.py
2. compat/vN/compat/vK/compat/__init__.py
Args:
compat_api_versions: list of compat versions.
Returns:
List of __init__.py file paths to include under nested compat modules.
"""
files = []
for v in compat_api_versions:
files.extend([
"compat/v%d/compat/v%d/__init__.py" % (v, sv)
for sv in compat_api_versions
])
files.extend([
"compat/v%d/compat/v%d/compat/__init__.py" % (v, sv)
for sv in compat_api_versions
])
return files | da66d00b51192aa31059284f478ef9d743e72867 | 692,224 |
import torch
def val_test_collate(batch):
"""
Returns:
input_tensor: [3,H,W]
depth_tensor: [1,H,W]
mesh_list
orig_input_tensor: [3,H,W]
orig_depth_tensor: [1,H,W]
"""
# Fixed size
scenes = torch.stack([item[0] for item in batch], 0) # [B,3,H,W]
depths = torch.stack([item[1] for item in batch], 0) # [B,1,H,W]
mesh_list = [item[2] for item in batch] # list of mesh
orig_scenes = torch.stack([item[3] for item in batch], 0) # [B,3,H',W']
orig_depths = torch.stack([item[4] for item in batch], 0) # [B,1,H',W']
return [scenes, depths, mesh_list, orig_scenes, orig_depths] | f98ab52ef389218e1451a439b9cfd354271622e3 | 692,225 |
import functools
import os
def cleanup(filename):
"""
This function should be used as a decorator. The filename is
appended to the function args. After finishing the function the
file by that filename is deleted.
"""
def wrapper(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
args += (filename,)
try:
result = fn(*args, **kwargs)
except:
if os.path.isfile(filename):
os.remove(filename)
raise
if os.path.isfile(filename):
os.remove(filename)
return result
return inner
return wrapper | 8b4625cc4e4abd560b6f99cd49c04b7b06606b99 | 692,226 |
def get_dispatch(data):
"""
Returns the dispatch type.
This will help determine how to parse the record
"""
return data[1] | b29baa671b9b4f9418be0c0adc6606a162b4b0cd | 692,227 |
import subprocess
def run(argv, *args, **kwargs):
"""Wrap subprocess.run and log what commands it runs."""
return subprocess.run(argv, *args, **kwargs) | 1ab298a1b6ab9aa477422d29dc3b0b431e996953 | 692,229 |
def _gr_ymin_ ( graph ) :
""" Get minimal y for the points
>>> graph = ...
>>> ymin = graph.ymin ()
"""
ymn = None
np = len(graph)
for ip in range( np ) :
x , y = graph[ip]
if None == ymn or y <= ymn : ymn = y
return ymn | 7dab83192dd7747d2ef12076f46783c87da678bf | 692,230 |
def _to_nested_dict(df):
"""Converts a multiindex series to nested dict"""
if hasattr(df.index, 'levels') and len(df.index.levels) > 1:
ret = {}
for k, v in df.groupby(level=0):
ret[k] = _to_nested_dict(v.droplevel(0))
return ret
else:
return df.to_dict() | 2bceed2f18fc51461bec39f3bc9879e89968dc45 | 692,231 |
def human_size(_bytes, traditional=((1024 ** 5, 'P'),
(1024 ** 4, 'T'),
(1024 ** 3, 'G'),
(1024 ** 2, 'M'),
(1024 ** 1, 'K'),
(1024 ** 0, 'B'))):
"""Human-readable size"""
for factor, suffix in traditional:
if _bytes >= factor:
amount = round(_bytes/factor, 2)
return str(amount) + suffix
else:
return str(_bytes) | 7cbd1037ec4066c7ccdc3a08e314a5e377d9f3c7 | 692,232 |
def concatenated(lst, element):
"""
concatenates `element` to `lst` and
returns lst
"""
lst.append(element)
return lst | 66b68bc4c043c8cd223f8f39766c01ec6f39ddc4 | 692,233 |
def pos_in_bounding_rect(b_rect, x, y):
"""
Renvoie True si la position x, y est dans le rect.
Le rect à passer en paramètre s'étend de x1 à x2 - 1, et de y1 à y2 - 1.
"""
x_min, y_min, x_max, y_max = b_rect
return (x_min <= x < x_max) and (y_min <= y < y_max) | 80475673ca16910d26e24df529e3d66f4c3fcbbd | 692,234 |
import time
def timeit(func, *args, **kwargs):
""" This is a wrapper function to calculate how fast each operation takes.
Note that we are assuming that we do not need to do anything with the return
value of the function.
Args:
func: function pointer
args: arguments to the function
kwargs: named arguments not defined in advance to be passed in to the function
"""
start = time.time()
func(*args, **kwargs)
elapsed = time.time() - start
return elapsed | 5ce1494d8a3c4ae313ae2aad28292a049536f15a | 692,236 |
def make_data(img_names: list, labels: list) -> list:
"""Format data appropriately for Pandas DataFrame.
Args:
img_names (list): Names of images.
labels (list): Labels for images from dataset.
Returns:
list: List containing appropriate information for DataFrame.
"""
rows = []
for id_, img_name in enumerate(img_names):
for label in labels[id_]:
row = [img_name.item()]
row.extend(label)
rows.append(row)
return rows | cc9bdd97f1640b4c6293c452db628b5859ad6f2f | 692,237 |
def ftell(space, w_res):
""" ftell - Returns the current position of the file read/write pointer """
if w_res.tp == space.tp_bool:
space.ec.warn("ftell() expects parameter 1 "
"to be resource, boolean given")
return space.w_False
if not w_res.is_valid():
space.ec.warn("ftell(): %d is not a valid "
"stream resource" % w_res.int_w(space))
return space.w_False
pos = w_res.tell()
return space.newint(pos) | 8e7ee12b81d97e1c4584cdc6079bfa57fcec241e | 692,238 |
def update_parameters_with_sgd(paras, grads, learning_rate):
""" update parameters using SGD
```
VdW = beta * VdW - learning_rate * dW
Vdb = beta * Vdb - learning_rate * db
W = W + beta * VdW - learning_rate * dW
b = b + beta * Vdb - learning_rate * db
```
Paras
------------------------------------
parameters: python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads: python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate: the learning rate, scalar.
Returns
------------------------------------
parameters: python dictionary containing your updated parameters
"""
L = len(paras) // 4
for l in range(L):
paras["W" + str(l + 1)] = paras["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)]
paras["b" + str(l + 1)] = paras["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)]
if l < L - 1:
paras["gamma" + str(l + 1)] = paras["gamma" + str(l + 1)] - learning_rate * grads["dgamma" + str(l + 1)]
paras["beta" + str(l + 1)] = paras["beta" + str(l + 1)] - learning_rate * grads["dbeta" + str(l + 1)]
return paras | 56877f76bb763b78df398ff6dbfbb960d83445f4 | 692,239 |
import importlib
def module_available(module_name: str) -> bool:
"""check whether a python module is available
Args:
module_name (str): The name of the module
Returns:
`True` if the module can be imported and `False` otherwise
"""
try:
importlib.import_module(module_name)
except ImportError:
return False
else:
return True | 21f6eca1cac908cda5bb099b3e7b7fb074463af8 | 692,240 |
import six
def flatten_dict(original_dict, delimiter="/"):
"""Flattens a dictionary of dictionaries by one level.
Note that top level keys will be overridden if they collide with flat keys.
E.g. using delimiter="/" and origial_dict={"foo/bar": 1, "foo": {"bar": 2}},
the top level "foo/bar" key would be overwritten.
Args:
original_dict: The dictionary to flatten.
delimiter: The value used to delimit the keys in the flat_dict.
Returns:
The falttened dictionary.
"""
flat_dict = {}
for outer_key, inner_dict in six.iteritems(original_dict):
if isinstance(inner_dict, dict):
for inner_key, value in six.iteritems(inner_dict):
flat_dict["{}{}{}".format(outer_key, delimiter, inner_key)] = value
else:
flat_dict[outer_key] = inner_dict
return flat_dict | af4bd771e4b8645d1cb98222422753c8c3e0e563 | 692,241 |
def make_anonymous_factorial():
"""Return the value of an expression that computes factorial.
>>> make_anonymous_factorial()(5)
120
>>> from construct_check import check
>>> # ban any assignments or recursion
>>> check(HW_SOURCE_FILE, 'make_anonymous_factorial', ['Assign', 'AugAssign', 'FunctionDef', 'Recursion'])
True
"""
return 'YOUR_EXPRESSION_HERE' | 5cd80e010aec49ab6e66d96c7c293125032d7cdf | 692,242 |
def upper_case(words):
"""
Set ALL words to UPPER case.
"""
return [w.upper() for w in words] | 408f707567e1c17c0b8b2466f597a271d605b434 | 692,243 |
def split_D_by_A(D, A=[], tag_column=-1):
"""
split_D_by_A(D, A=[], tag_column=-1)
根据A划分数据集D
output:
{attr1: sub_D1, attr2: sub_D2, ...}
"""
A = [A] if not isinstance(A, list) else A
attribs_and_tags = {}
for line in D:
temp_D = []
for index, i in enumerate(line):
temp_D.append(i) if index not in A else None
key = tuple([line[i] for i in A if i != tag_column])
if key not in attribs_and_tags:
attribs_and_tags[key] = []
attribs_and_tags[key].append(temp_D)
return attribs_and_tags | a5fef9ab1afd932128bf009bf2900aad7824832c | 692,244 |
def safe_lower(txt):
""" Return lowercased string. Return '' for None """
if txt:
return txt.lower()
else:
return "" | b54b8624a28e5cc5fba3c49e84f371f87abb4f55 | 692,245 |
import os
def current_umask():
""" Get the current umask value. Results are cached, via “functools.lru_cache(…)” """
mask = os.umask(0)
os.umask(mask)
return mask | cbed139fe7af186addc424c2105de9c24b81e4e7 | 692,246 |
def get_from_module(module_params, module_name, identifier):
"""Gets a class/instance of a module member specified by the identifier.
Args:
module_params: dict, contains identifiers
module_name: str, containing the name of the module
identifier: str, specifying the module member
Returns:
a class or an instance of a module member specified
by the identifier
"""
res = module_params.get(identifier.lower())
if res is None:
raise ValueError("Invalid {} identifier!".format(module_name), identifier)
return res | 2a31ad8f4eeb434fcdf37ae4056b9df1b868a212 | 692,247 |
import numpy as np
def rgb_to_hls(rgb):
"""RGB to HSL conversion.
See colorsys module.
"""
maxc = np.max(rgb, axis=2)
minc = np.min(rgb, axis=2)
nonzero_index = (minc < maxc)
c_diff = maxc - minc
l = (minc + maxc) / 2.0
s = np.zeros_like(l)
h = np.zeros_like(l)
index = nonzero_index
s[index] = c_diff[index] / (2.0 - maxc[index] - minc[index])
index = (l <= 0.5) & nonzero_index
s[index] = c_diff[index] / (maxc[index] + minc[index])
rc, gc, bc = (
maxc[nonzero_index] -
rgb[:, :, i][nonzero_index] / c_diff[nonzero_index] for i in range(3)
)
hc = 4.0 + gc - rc # 4 + gc - rc
index = (rgb[:, :, 1][nonzero_index] == maxc[nonzero_index])
hc[index] = 2.0 + rc[index] - bc[index] # 2 + rc - bc
index = (rgb[:, :, 0][nonzero_index] == maxc[nonzero_index])
hc[index] = bc[index] - gc[index] # bc - gc
h[nonzero_index] = (hc / 6.0) % 1.0
return h, l, s | d263f66b67975f31cf2ba16de0f73414436375c0 | 692,248 |
import os
import subprocess
import re
def nbconvert_post(event, context):
"""Handle a POST request to /notebook."""
jupyter_path = os.path.join(
os.environ["LAMBDA_TASK_ROOT"],
"jupyter")
html_output = {}
for notebook_filename in event['files']:
# lambda lets us writek to /tmp
local_path = os.path.join("/tmp", notebook_filename)
with open(local_path, "w") as local_notebook:
local_notebook.write(event["files"][notebook_filename])
subprocess.run([jupyter_path, "nbconvert", "--to", "html", local_path], check=True)
# I guess?
html_path = re.sub("\.ipynb$", ".html", local_path)
html = open(html_path).read()
html_output[notebook_filename] = html
return {
"metadata": event["metadata"], # but why tho
"files": html_output
} | d883abee14564a0fe48277aca181b98049e4ba41 | 692,250 |
import os
def list_dir(path, ext="", include_dirs=False):
"""get all file paths from local directory with extension
:param ext: optional extension for file
:param include_dirs: boolean option to include directories
:param path: path to directory
:return: list of paths to files
"""
assert type(path) is str, "Path must be a string"
assert os.path.isdir(path), "Path does not exist: {}".format(path)
if ext == "":
if include_dirs:
only_files = [os.path.join(os.path.abspath(path), f) for f in
os.listdir(path) if
os.path.exists(os.path.join(os.path.abspath(path), f))]
else:
only_files = [os.path.join(os.path.abspath(path), f) for f in
os.listdir(path) if
os.path.isfile(os.path.join(os.path.abspath(path), f))]
else:
only_files = [os.path.join(os.path.abspath(path), f) for f in
os.listdir(path) if
os.path.isfile(os.path.join(os.path.abspath(path), f))
if f.endswith(ext)]
return only_files | a4d31ba04d47f6d25035009203684ba62c6cd8ff | 692,251 |
def lookup_counts(
row,
lookup_table,
index="step",
columns="participant",
default=False
):
"""
Function to apply to a DataFrame to cross-reference
counts in a lookup_table.
Parameters
----------
row: Series
row of a DataFrame
lookup_table: DataFrame
DataFrame to cross-reference
index: string or numeric, opitional
name of column in row that contains an index value
for lookup_table, default = "step"
columns: string or numeric, opitional
name of column in row that contains a column name
for lookup_table, default = "participant"
default: boolean or other, optional
value to return if lookup not in lookup table
default = False
Returns
-------
value: boolean or other
the value at index, columns; otherwise default
"""
try:
return(
lookup_table.loc[
row[index],
row[columns]
].all()
)
except:
return(default) | 14526341270e49b3beee5ae49083b83835d7a2e9 | 692,252 |
def affinepars2header(hdr, affine2d):
""" writes affine2d parameters into fits header """
hdr['affine'] = (affine2d.name, 'Affine2d in pupil: name')
hdr['aff_mx'] = (affine2d.mx, 'Affine2d in pupil: xmag')
hdr['aff_my'] = (affine2d.my, 'Affine2d in pupil: ymag')
hdr['aff_sx'] = (affine2d.sx, 'Affine2d in pupil: xshear')
hdr['aff_sy'] = (affine2d.sx, 'Affine2d in pupil: yshear')
hdr['aff_xo'] = (affine2d.xo, 'Affine2d in pupil: x offset')
hdr['aff_yo'] = (affine2d.yo, 'Affine2d in pupil: y offset')
hdr['aff_dev'] = ('analyticnrm2', 'dev_phasor')
return hdr | 2920dd3d7af4ccae322b49736caec48857c03043 | 692,253 |
def pretty_i(iterable, indent_char='\t', new_line_char='\n', indent=0):
"""
Args:
iterable (any): Iterable que vai ser formatado. (não funciona com generators).
indent_char (str): Char que vai ser usado na indentação (Default value = '\t')
new_line_char (str): Char que vai ser usado para quebrar as linhas (Default value = '\n')
indent (int): Parâmetro que é usado internamente na recursão (Default value = 0)
Returns:
str: Uma string, com o iterable formatado.
"""
new_line = new_line_char + indent_char * (indent + 1)
extremities = ''
if isinstance(iterable, dict):
items = []
for key, value in iterable.items():
items.append(f'{new_line}{repr(key)}: {pretty_i(value, indent_char, new_line_char, indent + 1)}')
extremities = '{}'
elif isinstance(iterable, (list, tuple, set, frozenset)):
items = [new_line + pretty_i(item, indent_char, new_line_char, indent + 1) for item in iterable]
if isinstance(iterable, list):
extremities = '[]'
elif isinstance(iterable, tuple):
extremities = '()'
elif isinstance(iterable, set):
extremities = '{}'
elif isinstance(iterable, frozenset):
extremities = 'frozenset()'
else:
return repr(iterable)
return extremities[:-1] + (','.join(items) + new_line_char + indent_char * indent) + extremities[-1:] | 8e655ee427ce016d98c46b3fadf0c16a9a39d886 | 692,254 |
from unittest.mock import Mock
def mockBackend(*counts):
""" Takes a list of counts, returns a mock backend that outputs them. """
def get_result(i):
result = Mock()
result.get_counts.return_value = counts[i]
return result
mock = Mock()
mock.process_circuits.return_value = list(range(len(counts)))
mock.get_result = get_result
return mock | 5e981a6b048a6490119bec1954e140e5cda474e9 | 692,255 |
def earliest(timestamp, buslines):
"""Meh
>>> earliest(*split_input(EXAMPLE))
[59, 944]
295
"""
earliest_per_line = [
[busno, (timestamp // busno + 1) * busno] for busno in buslines
]
earliest_line = min(earliest_per_line, key=lambda e: e[1])
print(earliest_line)
return earliest_line[0] * (earliest_line[1] - timestamp) | 6cf88f4cc21a6a29f6bf0af7d03b492c460c6e4e | 692,256 |
import re
def normalize(obj):
"""Normalize output object.
Args:
obj: Google Test's JSON output object to normalize.
Returns:
Normalized output without any references to transient information that may
change from run to run.
"""
def _normalize(key, value):
if key == 'time':
return re.sub(r'^\d+(\.\d+)?s$', '*', value)
elif key == 'timestamp':
return re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ$', '*', value)
elif key == 'failure':
value = re.sub(r'^.*[/\\](.*:)\d+\n', '\\1*\n', value)
return re.sub(r'Stack trace:\n(.|\n)*', 'Stack trace:\n*', value)
else:
return normalize(value)
if isinstance(obj, dict):
return {k: _normalize(k, v) for k, v in obj.items()}
if isinstance(obj, list):
return [normalize(x) for x in obj]
else:
return obj | 391090c29f650aa98cd9781ff9727fe636496097 | 692,257 |
def get_Row_Col(input_Str):
"""get the rows and columns of matrix"""
rows,columns=input_Str.split(' ')
return int(rows),int(columns) | 1955963a4ec8efb82734ec2d8cd4cdc0ae45a7bb | 692,258 |
def counting_sort(arr):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/countingsort2/problem
Given an unsorted list of integers, use the counting sort method to sort the list and then print the sorted list.
Args:
arr (list): List of integers to sort
Returns:
list: The list of integers in sorted ascending order
"""
sorted_list = []
counted_list = [0] * 100
for i in arr:
counted_list[i] += 1
for idx, num in enumerate(counted_list):
for _ in range(num):
sorted_list.append(idx)
return sorted_list | 18f6b91121ca4101a5c62f03a58908e4f719417b | 692,259 |
def polynomial_add_polynomial(a, b):
"""
Addition function of two polynomials.
:param a: First polynomial.
:param b: Second polynomial.
:return: The result of adding two polynomials
"""
len_a, len_b = len(a), len(b)
if len_a < len_b:
a, b, len_a, len_b = b, a, len_b, len_a
return [a[i] + b[i] for i in range(len_b)] + a[len_b:] | 3ef0d4ba1f2b47a8f0ee73f27b0f2b0697ab8920 | 692,260 |
def calculate_annual_charging_kwh(database):
"""
Calculate the annual charging kWh from the charging profile.
:param database:
:return:
"""
c = database.cursor()
weekday_charging = c.execute(
"""SELECT sum(ev_charging_kw * number_weekday_days_in_month)
FROM weekday_profiles;"""
).fetchone()[0]
weekend_charging = c.execute(
"""SELECT sum(ev_charging_kw * number_weekend_days_in_month)
FROM weekend_profiles;"""
).fetchone()[0]
total_charging = weekday_charging + weekend_charging
return total_charging | 9ba8043d255623eed8c81a7691e67a0bf575aede | 692,261 |
import os
def parse_filename(filename):
"""Parse filename of the pickle file."""
name = os.path.basename(filename)
name = name.replace('.pkl', '')
tokens = name.split('_')
end_time = int(tokens[-1])
start_time = int(tokens[-2])
video_id = '_'.join(tokens[0:-2])
return video_id, start_time, end_time | 5022f67444fe7d2845e5f2df92efcf84ee31bef1 | 692,262 |
def format_face_coords(ibm_analyze_result):
"""
Parse the face coords extracted from IBM service_v4.
:param ibm_analyze_result: the json object directly returned from IBM face detection service_v4
see an example in "watson_experiment/sample_face_and_result/sample_output.json"
:return: a list of location, each looks like
{
"left": 64,
"top": 72,
"width": 124,
"height": 151
},
"""
outer_objects = ibm_analyze_result['images'][0]['objects']
if not outer_objects: # i.e. dictionary is empty, no face detected
return []
else:
objects = outer_objects['collections'][0]['objects']
return [obj['location'] for obj in objects] | d633db1963b17bf07a47284eba1fcd4448afda75 | 692,263 |
import collections
def token_counts(tokens, size_mb=1):
"""
Returns a count for the number of times a token appears in a list.
bounter is slower here since we aren't counting a large enough corpus.
"""
return collections.Counter(tokens) | a04996db5173a579abb97e5a8a05b84a019438dc | 692,264 |
def apply_threshold(heatmap, threshold):
"""
Simple unitliy function which encapsulates heap-map thresholding algorithm
:param heatmap:
:param threshold:
:return:
"""
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap | 65fcd28738660c1e3761b3ea554cf854287bf432 | 692,265 |
def get_rec_from_generator(recordID, gen, method=None):
""" given a record ID and and SeqIO generator return sequence of
genbank record that has all the loci, and call method to refresh generator
If on different sequences, return error
"""
for record in gen:
if recordID == record.id:
if method is not None:
method()
return record
else:
pass
# if none found, raise error
raise ValueError("no record found matching record id %s!" % recordID) | df0373e72d1b3dd65267199de761bd9739ba84ca | 692,266 |
from docutils.core import publish_parts
def do_rst(s):
"""
Parse the string using the reStructuredText parser from the
docutils package.
requires `docutils`_.
.. _docutils: http://docutils.sourceforge.net/
"""
parts = publish_parts(source=s, writer_name='html4css1')
return parts['fragment'] | 071ba7bab66b696eed1d3e92f5f163d24ee55cf6 | 692,267 |
def first(*args):
"""
Returns first not `None` argument.
"""
for item in args:
if item is not None:
return item | e91ef779047272e8e4892b28adc4dc57c83df1dd | 692,268 |
def Q_wastecooler(W_mass, Cw, t_coolwater_exit, tw):
"""
Calculates the heat load of waste cooler.
Parameters
----------
W_mass : float
The mass flow rate of waste, [kg/s]
Cw : float
The heat capacity of waste, [J/(kg * degrees C)]
t_coolwater_exit: float
The end temperature of cool waste, [degrees C]
tw : float
The temperature of boiling waste, [degrees celcium]
Returns
-------
Q_wastecooler : float
The heat load of waste cooler, [W] , [J/s]
References
----------
Дытнерский, формула 2.2, стр.45
"""
return W_mass * Cw * (tw - t_coolwater_exit) | 2f64677976e02649d1129b9482c76c787bdf3fdd | 692,269 |
def rgb_to_xyz(red, green, blue):
"""
Convert standard RGB color to XYZ color.
:arg int red: RGB value of Red.
:arg int green: RGB value of Green.
:arg int blue: RGB value of Blue.
:returns: Tuple (X, Y, Z) representing XYZ color
:rtype: tuple
D65/2° standard illuminant
"""
rgb = []
for val in red, green, blue:
val /= 255.0
if val > 0.04045:
val = pow((val + 0.055) / 1.055, 2.4)
else:
val /= 12.92
val *= 100
rgb.append(val)
red, green, blue = rgb # pylint: disable=unbalanced-tuple-unpacking
x_val = red * 0.4124 + green * 0.3576 + blue * 0.1805
y_val = red * 0.2126 + green * 0.7152 + blue * 0.0722
z_val = red * 0.0193 + green * 0.1192 + blue * 0.9505
return x_val, y_val, z_val | dfd5048a64ef6a9f711fe6416d001481bce01511 | 692,270 |
def replace_dir_vars(path, d):
"""Replace common directory paths with appropriate variable references (e.g. /etc becomes ${sysconfdir})"""
dirvars = {}
for var in d:
if var.endswith('dir') and var.lower() == var:
value = d.getVar(var, True)
if value.startswith('/') and not '\n' in value:
dirvars[value] = var
for dirpath in sorted(dirvars.keys(), reverse=True):
path = path.replace(dirpath, '${%s}' % dirvars[dirpath])
return path | 553e95878fe2f9cabffb0a6931d81e400b712076 | 692,271 |
def map_to_ta_modes(ins, max_group, min_group):
"""Turns the min/max groups into the closest allowable
TA group mode.
Parameters
----------
ins : str
Instrument.
max_group : int
The maximum number of groups without oversaturating.
min_group : int
The groups needed to hit the target SNR.
Returns
-------
min_ta_groups : int
The min possible groups to hit target SNR.
max_ta_groups : int
The max possible groups before saturation.
"""
# Allowable group modes for each ins
groups = {'miri': [3, 5, 9, 15, 23, 33, 45, 59, 75, 93, 113, 135, 159, 185, 243, 275, 513],
'niriss': [3, 5, 7, 9, 1, 13, 15, 17, 19],
'nirspec': [3],
'nircam': [3, 5, 9, 17, 33, 65]
}
# Match the literal min and max groups to the nearest mode.
allowable_groups = groups[ins]
min_ta_groups = min(allowable_groups, key=lambda x:abs(x-min_group))
max_ta_groups = min(allowable_groups, key=lambda x:abs(x-max_group))
# Unless it was oversaturated from the get-go OR there aren't enough groups
# for SNR
if min_group == 0:
min_ta_groups = 0
max_ta_groups = 0
if min_group > max(allowable_groups):
min_ta_groups = -1
max_ta_groups = 0
# BOTH ARE FLIPPED RN -- I WILL FLIP BOTH BACK SOON...
return max_ta_groups, min_ta_groups | 7f661ce556a20903ede5815e5bbe99918746298d | 692,272 |
def t4():
"""testing the short interval parameter.
"""
return 1 / 0 | a306c9a477859dc3296b7200b6d0d1cdbe618ed4 | 692,273 |
def clean_html(data):
"""
cleaning the data by removing all new-line characters
and unwanted white-spaces
"""
return ' '.join(data.replace("\n", "").split()) | ccc46c9b26becf595ae19cb1d2e90faddd7d1143 | 692,274 |
def compute_gc_content(dna):
"""
computes GC-content of dna (the percentage of its bases that are either cytosine or guanine).
Args:
dna (str): DNA string.
Returns:
int: GC-content of dna.
"""
gc = dna.count("G") + dna.count("C")
gc_content = gc * 100 / len(dna)
return gc_content | 556e1f0d481b624e24b97a0eca359871496648d0 | 692,275 |
import os
import json
def load_labels() -> dict:
""" Load class labels from the ImageNet json.
Returns:
dict: Dictionary which contains class labels
"""
path = os.path.dirname(os.path.abspath(__file__))
# See https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json
path = os.path.join(path, 'imagenet_class_index.json')
with open(path, 'r') as f:
labels = json.load(f)
return labels | 73cbdefc3bc1f7fc6f82072836768bb6b94b2c68 | 692,276 |
def prettycase(var): # Some Variable
"""
Pretty case convention. Include space between each element and uppercase the first letter of element.
:param var: Variable to transform
:type var: :py:class:`list`
:returns: **transformed**: (:py:class:`str`) - Transformed input in ``Pretty Case`` convention.
"""
result = ""
for i, element in enumerate(var):
element = list(element)
element[0] = element[0].upper()
result += "".join(element) + " "
return result[:-1] | c993d9dcc4fb65961887b9f0eb9e7928cc41ca11 | 692,277 |
import getpass
import requests
def clube_id_e_sessao():
"""Esta função inicializa a sessão requests para poder pegar os dados na API do Sokker e pega o id do clube.
param:
login_name-> Login do usuário no sokker
password -> senha do usuário no sokker
return: sessao-> variável que será utilizada para pegar os dados da API após o login feito
id do clube -> variável a ser utilizad para pegar as informações do clube do usuário logado quando necessário
"""
# Informações de login
login_name= input('Login: ')
password= getpass.getpass('Password: ')
# Request Session
session = requests.Session()
# login no sokker
url_login= 'https://sokker.org/start.php?session=xml'
cadastro= {'ilogin': login_name, 'ipassword': password}
logging = session.post(url_login, data = cadastro)
status= logging.status_code
# Pegando o id do clube logado
id_club = logging.text.split('=')[1]
if len(id_club) <= 2:
print("Algo errado ocorreu ao tentar acessar a API do jogo. Vá ao link (https://online.sokker.org/xmlinfo.php) e cheque o problema a partir do resultado da variável id_club e seu problema correspondente neste site.")
else:
print(f'Clube ID: {id_club}')
return session, id_club | 21d5c2d96e8851f05f23b1920fd7054e52efa791 | 692,278 |
def load_ids():
"""
slightly different loading atm because I have a feeling some special
characters will be a headache and I will have to go through
them manually later in the process
"""
with open("clustering/ids.txt", "r") as f:
lines = f.readlines()
ids = {}
by_cluster = {}
for i in range(65): #I just know there's 65 clusters
by_cluster[i] = []
for l in lines:
fields = l.split("\t")
id_ = int(fields[0])
ids[id_] = (fields[2].strip(), fields[1].strip())
by_cluster[int(fields[3])].append(id_)
return ids, by_cluster | c4a1b6dc246748d984015cdaa9f4810ae8a5d502 | 692,279 |
def read_relase_notes(f):
"""Attempts to extract total number of sequences from relase notes"""
with open(f, 'r') as fhin:
for line in fhin:
line = line.rstrip().lstrip()
if line.split(":")[0] == "Number of clusters":
n = int(line.split(":")[-1].replace(",",""))
return n
return -1 | 191fae4be9399e9ff678e568a04a4c3eab49cd78 | 692,281 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.