content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def check_str_length(str_to_check, limit=MAX_LENGTH):
"""Check the length of a string. If exceeds limit, then truncate it.
:type str_to_check: str
:param str_to_check: String to check.
:type limit: int
:param limit: The upper limit of the length.
:rtype: tuple
:returns: The string it self if not exceeded length, or truncated string
if exceeded and the truncated byte count.
"""
str_bytes = str_to_check.encode(UTF8)
str_len = len(str_bytes)
truncated_byte_count = 0
if str_len > limit:
truncated_byte_count = str_len - limit
str_bytes = str_bytes[:limit]
result = str(str_bytes.decode(UTF8, errors='ignore'))
return (result, truncated_byte_count)
|
73ae59241a18c5398c041c2c332c932831a39c55
| 3,638,689
|
async def create_or_update(
hub,
ctx,
name,
resource_group,
prefix_length,
sku="standard",
public_ip_address_version="IPv4",
zones=None,
**kwargs,
):
"""
.. versionadded:: 4.0.0
Creates or updates a static or dynamic public IP prefix.
:param name: The name of the public IP prefix.
:param resource_group: The resource group of the public IP prefix.
:param prefix_length: An integer representing the length of the Public IP Prefix. This value is immutable
once set. If the value of the ``public_ip_address_version`` parameter is "IPv4", then possible values include
28, 29, 30, 31. If the value of the ``public_ip_address_version`` parameter is "IPv6", then possible values
include 124, 125, 126, 127.
:param sku: The name of a public IP prefix SKU. Possible values include: "standard". Defaults to "standard".
:param public_ip_address_version: The public IP address version. Possible values include: "IPv4" and "IPv6".
Defaults to "IPv4".
:param zones: A list of availability zones that denotes where the IP allocated for the resource needs
to come from.
CLI Example:
.. code-block:: bash
azurerm.network.public_ip_prefix.create_or_update test_name test_group test_length
"""
if "location" not in kwargs:
rg_props = await hub.exec.azurerm.resource.group.get(
ctx, resource_group, **kwargs
)
if "error" in rg_props:
log.error("Unable to determine location from resource group specified.")
return {
"error": "Unable to determine location from resource group specified."
}
kwargs["location"] = rg_props["location"]
if sku:
sku = {"name": sku.lower()}
result = {}
netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)
try:
prefix_model = await hub.exec.azurerm.utils.create_object_model(
"network",
"PublicIPPrefix",
prefix_length=prefix_length,
sku=sku,
public_ip_address_version=public_ip_address_version,
zones=zones,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
prefix = netconn.public_ip_prefixes.create_or_update(
resource_group_name=resource_group,
public_ip_prefix_name=name,
parameters=prefix_model,
)
prefix.wait()
result = prefix.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs)
result = {"error": str(exc)}
except SerializationError as exc:
result = {
"error": "The object model could not be parsed. ({0})".format(str(exc))
}
return result
|
08fb4db3f585ec8844e28e5fa90867f588cdb91a
| 3,638,690
|
def _get_job_name(job_label: str = None) -> str:
"""Returns Beam runner job name.
Args:
job_label: A user defined string that helps define the job.
Returns:
A job name compatible with apache beam runners, including a time stamp to
insure uniqueness.
"""
job_name = 'tfrecorder-' + common.get_timestamp()
if job_label:
job_label = job_label.replace('_', '-')
job_name += '-' + job_label
return job_name
|
3786c532109c880ec9255b82a6a2442cf8414780
| 3,638,691
|
def all_not_none(*args):
"""Shorthand function for ``all(x is not None for x in args)``. Returns
True if all `*args` are not None, otherwise False."""
return all(x is not None for x in args)
|
2d063f39e253a78b28be6857df08d8f386d8eb4a
| 3,638,692
|
def weighted_photon_spec(eng):
""" Returns the weighted photon spectrum from positronium annihilation.
This assumes 3/4 ortho- and 1/4 para-, normalized to a single
annihilation.
Parameters
----------
eng : ndarray
The energy abscissa.
Returns
-------
Spectrum
The resulting photon :class:`.Spectrum` object.
"""
return 3/4*ortho_photon_spec(eng) + 1/4*para_photon_spec(eng)
|
39b5301d5e49f070d1128d678f4f0672b32c275d
| 3,638,693
|
def get_for_tag(app_name):
"""
Retorna a tag for customizada para listar registros no template list.html
:param app_name: Nome do app que está sendo criado
:type app_name: str
"""
return "{% for " + app_name + " in " + app_name + "s %}"
|
12399a148262893047bf21c20e784bfb33373c29
| 3,638,694
|
def _reloadFn(*args):
"""Placeholder callback function for :func:`_handleSIGHUP`."""
return True
|
261a54f52e4e448671b8625dae4fbc67116bd546
| 3,638,695
|
def interpolation_lagrange_matrix(old_grid, new_grid):
"""
Evaluate lagrange matrix to interpolate state and control values from the solved grid onto the new grid.
Parameters
----------
old_grid : <GridData>
GridData object representing the grid on which the problem has been solved.
new_grid : <GridData>
GridData object representing the new, higher-order grid.
Returns
-------
ndarray
The lagrange interpolation matrix.
"""
L_blocks = []
D_blocks = []
for iseg in range(old_grid.num_segments):
i1, i2 = old_grid.subset_segment_indices['all'][iseg, :]
indices = old_grid.subset_node_indices['all'][i1:i2]
nodes_given = old_grid.node_stau[indices]
i1, i2 = new_grid.subset_segment_indices['all'][iseg, :]
indices = new_grid.subset_node_indices['all'][i1:i2]
nodes_eval = new_grid.node_stau[indices]
L_block, D_block = lagrange_matrices(nodes_given, nodes_eval)
L_blocks.append(L_block)
D_blocks.append(D_block)
L = block_diag(*L_blocks)
D = block_diag(*D_blocks)
return L, D
|
ce219dbfa65842ad275fe22f1122474b865fcfb0
| 3,638,696
|
def optimize_shim(coils, unshimmed, mask, mask_origin=(0, 0, 0), bounds=None):
"""
Optimize unshimmed volume by varying current to each channel
Args:
coils (numpy.ndarray): X, Y, Z, N coil map
unshimmed (numpy.ndarray): 3D B0 map
mask (numpy.ndarray): 3D integer mask used for the optimizer (only consider voxels with non-zero values).
mask_origin (tuple): Mask origin if mask volume does not cover unshimmed volume
bounds (list): List of ``(min, max)`` pairs for each coil channels. None
is used to specify no bound.
Returns:
numpy.ndarray: Coefficients corresponding to the coil profiles that minimize the objective function
(coils.size)
"""
# cmap = plt.get_cmap('bone')
# cmap.set_bad('black')
# mag_fig, mag_ax = plt.subplots(1, 1)
# plotter_mag = Slice_Plotter(mag_ax, np.transpose((unshimmed), axes=(1, 0, 2)), f'Unshimmed Full', cmap=cmap)
# mag_fig.canvas.mpl_connect('scroll_event', plotter_mag.onscroll)
# plt.show(block=True)
# plt.close()
mask_range = tuple([slice(mask_origin[i], mask_origin[i] + mask.shape[i]) for i in range(3)])
mask_vec = mask.reshape((-1,))
# Least squares solver
N = coils.shape[3]
# Reshape coil profile: X, Y, Z, N --> [mask.shape], N
# --> N, [mask.shape] --> N, mask.size --> mask.size, N
coil_mat = np.reshape(np.transpose(coils[mask_range], axes=(3, 0, 1, 2)),
(N, -1)).T
coil_mat = coil_mat[mask_vec != 0, :] # masked points x N
unshimmed = unshimmed[mask_range]
unshimmed_vec = np.reshape(unshimmed, (-1,)) # mV
unshimmed_vec = unshimmed_vec[mask_vec != 0] # mV'
# Set up output currents and optimize
if bounds is not None:
bounds = np.asarray(bounds)
currents_0 = np.zeros(N)
currents_sp = opt.least_squares(shim_residuals, currents_0,
args=(unshimmed_vec, coil_mat), bounds=bounds)
currents = currents_sp.x
residuals = np.asarray(currents_sp.fun)
return (currents, residuals)
|
f9e5d88e4a54222c841b3a2a66254675218207c4
| 3,638,697
|
def _log(x1):
"""closure of log for zero arguments, sign-protected"""
with np.errstate(divide="ignore", invalid="ignore"):
x1 = np.where(np.abs(x1) > 0.001, x1, 1)
return np.where(x1 < -1, np.log(np.abs(x1)) * np.sign(x1), np.log(np.abs(x1)))
|
bcc782434b1e38749096d56e6b78df287a14eeb2
| 3,638,698
|
def whats_the_meaning_of_life(n_cores=23):
"""Answers the question about the meaning of life.
You don't even have to ask the question, it will figure it out for you.
Don't use more cores than available to mankind.
Parameters
----------
n_cores: int [default: 23]
The number of CPU cores to use.
Returns
-------
int
The type of the expected answer is of course an integer.
"""
return 42
|
9b42257161ad3063bd7d8faddb6e385aa5586bf0
| 3,638,699
|
import tqdm
def get_good_start(system, numdistricts):
"""
Basically, instead of starting with a really bad initial solution for
simulated annealing sometimes we can rig it to start with a decent one...
"""
print('Acquiring a good initial solution')
solution = Solution(system, numdistricts)
solution.generate_random_solution() # start with random solution
for i in tqdm(range(100)):
new_solution = Solution(system, numdistricts)
new_solution.generate_random_solution()
if new_solution.value > solution.value:
solution = new_solution
print('Starting with Solution[{}]'.format(solution.value))
return solution
|
2c557c0505d0c442890c96ebb6186e866b5a7726
| 3,638,700
|
def search_up(word_list, matrix):
"""Search words from word_list in matrix, up direction
:param word_list - list of strings
:param matrix - list of lists
:return list of lists"""
return straight_search(word_list, matrix, True, False)
|
e806af0505a1cb78bf1bc181a89ccee81407c0b7
| 3,638,701
|
def int_validator(inp, ifallowed):
"""
Test whether only (positive) integers are being keyed into a widget.
Call signature: %S %P
"""
if len(ifallowed) > 10:
return False
try:
return int(inp) >= 0
except ValueError:
return False
return True
|
ee433a6365a0aad58cab0cd59fa05e132b669053
| 3,638,702
|
import logging
import requests
def scrape_page(url):
"""
scrape page by url and return its html
:param url: page url
:return: html of page
"""
logging.info('scraping %s...', url)
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
logging.error('get invalid status code %s while scraping %s', response.status_code, url)
except requests.RequestException:
logging.error('error occurred while scraping %s', url, exc_info=True)
|
a09eb79ce6abe25e4eb740dcfeb7a4debfca0b88
| 3,638,703
|
def getProductionUrl(code,d0):
"""Get the url for outage data from d0 to d1."""
url = getUrl('png',code,2018,opts=[[None]])
url = url.replace('__datehere__',eomf.m2s(d0),)
return url
|
d7e13671494c719ecc41058d481bd8d4cef5a3ff
| 3,638,704
|
def has_gaps_in_region(read, region):
"""
Returns True if the given pysam read spans the given pybedtools.Interval,
``region``.
"""
# If the given read has gaps in its alignment to the reference inside the
# given interval (more than one block inside the SV event itself), there are
# gaps inside the SV.
tree = intervaltree.IntervalTree()
for block in read.get_blocks():
tree[block[0]:block[1]] = block
return len(tree[region.start:region.end]) > 1
|
df1e272044d47bb610a59e80f21ad0fcca484231
| 3,638,705
|
def zha_device_joined(opp, setup_zha):
"""Return a newly joined ZHA device."""
async def _zha_device(zigpy_dev):
await setup_zha()
zha_gateway = get_zha_gateway(opp)
await zha_gateway.async_device_initialized(zigpy_dev)
await opp.async_block_till_done()
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
|
e8d8ac320414762416e2a105583001ac452df6b6
| 3,638,706
|
def get_all_list_data(request_context, function, *args, **kwargs):
"""
Make a function request with args and kwargs and iterate over the "next" responses until exhausted.
Return initial response json data or all json data as a single list. Responses that have a series of
next responses (as retrieved by get_next generator) are expected to have data returned as a list.
If an exception is raised during the initial function call or in the process of paging over results,
that exception will be bubbled back to the caller and any intermediary results will be lost. Worst case
complexity O(n).
:param RequestContext request_context: The context required to make an API call
:param function function: The API function to call
:return: A list of all json data retrieved while iterating over response links, or the initial json
function response if there are no paged results
:rtype: list of json data or json
"""
response = function(request_context, *args, **kwargs)
data = response.json()
for next_response in get_next(request_context, response):
data.extend(next_response.json())
return data
|
dd9aea10691a553c4b36009d733c11d39ada970e
| 3,638,707
|
def is_outlier(x, check_finite=False, confidence=3):
"""Boolean mask with outliers
:param x: vector
:param check_finite:
:param confidence: confidence level: 1, 2, 3 or 4, which correspond to
90%, 95%, 99% and 99.9% two-tailed confidence respectively (normal
distribution). Default: 3 (99%)
:type x: numpy.ndarray
:type check_finite: bool
:type confidence: int
:return: vector with condition "is `x` outlier?"
"""
return np.logical_not(
is_not_outlier(x, check_finite=check_finite, confidence=confidence))
|
6fb1b9f157c3bf720a615892524d90df2f717096
| 3,638,708
|
def stateless_shuffle(value, seed):
"""Randomly shuffles a tensor, statelessly."""
flat_value = tf.reshape(value, [-1])
indices = tf.argsort(
tf.random.stateless_uniform(tf.shape(flat_value), seed=seed))
flat_shuffle = tf.gather(flat_value, indices)
return tf.reshape(flat_shuffle, tf.shape(value))
|
4fa1ab8538ab5ab7f356c68d75c9fa61395a6e75
| 3,638,709
|
def is_one_line_function_declaration_line(line: str) -> bool: # pylint:disable=invalid-name
"""
Check if line contains function declaration.
"""
return 'def ' in line and '(' in line and '):' in line or ') ->' in line
|
e402cbbedc587ab0d572dfe6c074aadef6980658
| 3,638,710
|
def check_if_ended(id):
"""
Check if the course has already ended.
:param id: Id of the course that needs to be checked.
:type id: int
:return: If a course has ended
:rtype: bool
"""
course = moodle_api.get_course_by_id_field(id)
end_date = course['courses'][0]['enddate']
if(dt.datetime.fromtimestamp(end_date) < dt.datetime.today()):
return True
else:
return False
|
0e67c58d2b107597f068a34c08289ce43f4d1beb
| 3,638,711
|
def get_all_applications(user, timeslot):
"""
Get a users applications for this timeslot
:param user: user to get applications for
:param timeslot: timeslot to get the applications.
:return:
"""
return user.applications.filter(Proposal__TimeSlot=timeslot)
|
40aec747174fa4a3ce81fe2a3a5eee599c81643a
| 3,638,712
|
import requests
import webbrowser
def get_console_url(args):
""" Get a console login URL """
# Get credentials, maybe assume the role
session_creds = get_credentials(args)
# build the token request and fetch the sign-in token
url = request_signin_token(args, session_creds)
r = requests.get(url,timeout=200.0)
if r.status_code != 200:
vprint('Error: Getting SigninToken', r.url)
vprint(r.content)
raise Exception(f'Bad response requesting signin token {r.reason}')
sin_token = r.json()['SigninToken']
# build the console signin url
sin_url = request_console_login(sin_token)
if args.output:
return sin_url
else:
vprint(f'Opening webbrowser for {sin_url}')
webbrowser.open(sin_url)
return None
|
0c12730f4e7f0367832f8cbe1e7b549b2582f2c6
| 3,638,713
|
def input_layer_from_space(space):
"""
create tensorlayer input layers from env.space input
:param space: env.space
:return: tensorlayer input layer
"""
if isinstance(space, Box):
return input_layer(space.shape)
elif isinstance(space, Discrete):
return tl.layers.Input(dtype=tf.int32, shape=(None, ))
raise NotImplementedError
|
491c6d03d717bd33aa26264cd0296799f7fd242b
| 3,638,714
|
def get_application_registry():
"""Return the application registry. If :func:`set_application_registry` was never
invoked, return a registry built using :file:`defaults_en.txt` embedded in the pint
package.
:param registry: a UnitRegistry instance.
"""
return _APP_REGISTRY
|
64b0eeb19933cc674d4e61c27432f02d12340d6d
| 3,638,717
|
import importlib
def get_dataset(cfg, designation):
"""
Return a Dataset for the given designation ('train', 'valid', 'test').
"""
dataset = importlib.import_module('.' + cfg['dataset'], __package__)
return dataset.create(cfg, designation)
|
3f872d6407110cf735968ad6d4939b40fec9167d
| 3,638,718
|
import uuid
def invite(email, inviter, user=None, sendfn=send_invite, resend=True,
**kwargs):
"""
Invite a given email address.
Returns a ``(User, sent)`` tuple similar to the Django
:meth:`django.db.models.Manager.get_or_create` method.
If a user is passed in, reinvite the user. For projects that support
multiple users with the same email address, it is necessary to pass in the
user to avoid throwing a MultipleObjectsReturned error.
If a user with ``email`` address does not exist:
* Creates a user object
* Set ``user.email = email``
* Set ``user.is_active = False``
* Set a random password
* Send the invitation email
* Return ``(user, True)``
If a user with ``email`` address exists and ``user.is_active == False``:
* Re-send the invitation
* Return ``(user, True)``
If a user with ``email`` address exists:
* Don't send the invitation
* Return ``(user, False)``
If the email address is blocked:
* Don't send the invitation
* Return ``(None, False)``
To customize sending, pass in a new ``sendfn`` function as documented by
:attr:`inviter2.utils.send_invite`:
::
sendfn = lambda invitee, inviter, **kwargs: 1
invite("foo@bar.com", request.user, sendfn = sendfn)
:param email: The email address
:param inviter: The user inviting the email address
:param pk: The pk of an existing user to be reinvited.
:param sendfn: An email sending function. Defaults to
:attr:`inviter2.utils.send_invite`
:param resend: Resend email to users that are not registered yet
"""
if OptOut.objects.is_blocked(email):
return None, False
try:
if not user:
user = User.objects.get(email=email)
if user.is_active:
return user, False
if not resend:
return user, False
except User.DoesNotExist:
username_field = getattr(User, 'USERNAME_FIELD', 'username')
if username_field == 'username':
user = create_inactive_user(email=email, username=uuid())
else:
user = create_inactive_user(email=email)
url_parts = int_to_base36(user.id), token_generator.make_token(user)
url = reverse('{}:register'.format(NAMESPACE), args=url_parts)
opt_out_url = reverse('{}:opt-out'.format(NAMESPACE), args=url_parts)
kwargs.update(opt_out_url=opt_out_url)
sendfn(user, inviter, url=url, **kwargs)
return user, True
|
46bc4d45b42d0cc2dbd80b2928f44e4926d24b77
| 3,638,719
|
def RestrictDictValues( aDict, restrictSet ):
"""Return a dict which has the mappings from the original dict only for values in the given set"""
return dict( item for item in aDict.items() if item[1] in restrictSet )
|
4333c40a38ad3bce326f94c27b4ffd7dc24ae19c
| 3,638,720
|
def classify_images(images_dir, petlabel_dic, model):
"""
Creates classifier labels with classifier function, compares labels, and
creates a dictionary containing both labels and comparison of them to be
returned.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images in this function.
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by pretrained CNN models (string)
petlabel_dic - Dictionary that contains the pet image(true) labels
that classify what's in the image, where its' key is the
pet image filename & it's value is pet image label where
label is lowercase with space between each word in label
model - pretrained CNN whose architecture is indicated by this parameter,
values must be: resnet alexnet vgg (string)
Returns:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
"""
results = {}
# loop through each image in the IMAGE_DIR
for image in listdir(images_dir):
# get the pet label determined by the image filename (see get_pet_labels function)
pet_label = petlabel_dic[image]
# create the full path to the image file
image_path = image_dir + image
# classify the image using the model, leveraging the prebuilt function classifer
classified_label = classifier(image_path, model)
# call split on CLASSIFIED_LABEL as per the documentation in test_classifier.py it's possible to have multiple
# words to describe one label. Thus get them into a list and then check if one of them matches PET_LABEL
classified_labels = classified_label.split(",")
match = 0
for label in classified_labels:
if pet_label.lower() == label.lower():
match = 1
break
results[image] = [pet_label, classified_label, match]
return results
|
7d6679de95da2f7a526ae18a27d9602218f05153
| 3,638,722
|
def create_redis_fixture(scope="function"):
"""Produce a Redis fixture.
Any number of fixture functions can be created. Under the hood they will all share the same
database server.
Args:
scope (str): The scope of the fixture can be specified by the user, defaults to "function".
Raises:
KeyError: If any additional arguments are provided to the function than what is necessary.
"""
@pytest.fixture(scope=scope)
def _(_redis_container, pmr_redis_config):
db = redis.Redis(host=pmr_redis_config.host, port=pmr_redis_config.port)
db.flushall()
assign_fixture_credentials(
db,
drivername="redis",
host=pmr_redis_config.host,
port=pmr_redis_config.port,
database=None,
username=None,
password=None,
)
return db
return _
|
0e2e79c34feeb805c8300145f3a04314069b873f
| 3,638,723
|
def get_heroes(**kwargs):
"""
Get a list of hero identifiers
"""
return make_request("GetHeroes",
base="http://api.steampowered.com/IEconDOTA2_570/", **kwargs)
|
b512377952c0c1415eb45cc05918e7d152516f83
| 3,638,724
|
from typing import Union
from typing import Optional
def gt_strategy(
pandera_dtype: Union[numpy_engine.DataType, pandas_engine.DataType],
strategy: Optional[SearchStrategy] = None,
*,
min_value: Union[int, float],
) -> SearchStrategy:
"""Strategy to generate values greater than a minimum value.
:param pandera_dtype: :class:`pandera.dtypes.DataType` instance.
:param strategy: an optional hypothesis strategy. If specified, the
pandas dtype strategy will be chained onto this strategy.
:param min_value: generate values larger than this.
:returns: ``hypothesis`` strategy
"""
if strategy is None:
strategy = pandas_dtype_strategy(
pandera_dtype,
min_value=min_value,
exclude_min=True if is_float(pandera_dtype) else None,
)
return strategy.filter(lambda x: x > min_value)
|
751ee69c3cf396d0d2ca043bad17c6ed80b8d46d
| 3,638,725
|
def get_smoker_status(observation):
"""Does `observation` represent a suvery response indicating that the patient is or was a smoker."""
try:
for coding in observation['valueCodeableConcept']['coding']:
if ('system' in coding and 'code' in coding and
coding['system'] == utils.SNOMED_SYSTEM and
(coding['code'] == '8517006' or coding['code'] == '449868002'
) # Former smoker or Every day smoker
):
return True
return False
except KeyError:
return False
|
e63f4ffc09af3af19fd493c4fbc2824ffa136a64
| 3,638,726
|
def model_input_data_api():
"""Returns records of the data used for the model."""
# Parse inputs
# Hours query parameter must be between 1 and API_MAX_HOURS.
hours = request.args.get('hours', default=24, type=int)
hours = min(hours, current_app.config['API_MAX_HOURS'])
hours = max(hours, 1)
df = execute_sql('''SELECT * FROM processed_data ORDER BY time''')
model_input_data = df.tail(n=hours).to_dict(orient='records')
return jsonify(model_input_data=model_input_data)
|
7f5d014dda1f4e778cf8df5aac453c8a19748465
| 3,638,727
|
def GetIAP(args, messages, existing_iap_settings=None):
"""Returns IAP settings from arguments."""
if 'enabled' in args.iap and 'disabled' in args.iap:
raise exceptions.InvalidArgumentException(
'--iap', 'Must specify only one of [enabled] or [disabled]')
iap_settings = messages.BackendServiceIAP()
if 'enabled' in args.iap:
iap_settings.enabled = True
elif 'disabled' in args.iap:
iap_settings.enabled = False
elif existing_iap_settings is not None:
iap_settings.enabled = existing_iap_settings.enabled
if iap_settings.enabled:
# If either oauth2-client-id or oauth2-client-secret is specified,
# then the other should also be specified.
if 'oauth2-client-id' in args.iap or 'oauth2-client-secret' in args.iap:
iap_settings.oauth2ClientId = args.iap.get('oauth2-client-id')
iap_settings.oauth2ClientSecret = args.iap.get('oauth2-client-secret')
if not iap_settings.oauth2ClientId or not iap_settings.oauth2ClientSecret:
raise exceptions.InvalidArgumentException(
'--iap',
'Both [oauth2-client-id] and [oauth2-client-secret] must be '
'specified together')
return iap_settings
|
bf17a15ebcaab42a930928e3a949c4357cdf718f
| 3,638,728
|
def get_default_instance():
"""Return the default VLC.Instance.
"""
global _default_instance
if _default_instance is None:
_default_instance = Instance()
return _default_instance
|
f45a6eca003bc1b52b1c0bca1d15643898189899
| 3,638,729
|
def dirty_multi_node_expand(node, precision, mem_map=None, fma=True):
""" Dirty expand node into Hi and Lo part, storing
already processed temporary values in mem_map """
mem_map = mem_map or {}
if node in mem_map:
return mem_map[node]
elif isinstance(node, Constant):
value = node.get_value()
value_hi = sollya.round(value, precision.sollya_object, sollya.RN)
value_lo = sollya.round(value - value_hi, precision.sollya_object, sollya.RN)
ch = Constant(value_hi,
tag=node.get_tag() + "hi",
precision=precision)
cl = Constant(value_lo,
tag=node.get_tag() + "lo",
precision=precision
) if value_lo != 0 else None
if cl is None:
Log.report(Log.Info, "simplified constant")
result = ch, cl
mem_map[node] = result
return result
else:
# Case of Addition or Multiplication nodes:
# 1. retrieve inputs
# 2. dirty convert inputs recursively
# 3. forward to the right metamacro
assert isinstance(node, Addition) or isinstance(node, Multiplication)
lhs = node.get_input(0)
rhs = node.get_input(1)
op1h, op1l = dirty_multi_node_expand(lhs, precision, mem_map, fma)
op2h, op2l = dirty_multi_node_expand(rhs, precision, mem_map, fma)
if isinstance(node, Addition):
result = Add222(op1h, op1l, op2h, op2l) \
if op1l is not None and op2l is not None \
else Add212(op1h, op2h, op2l) \
if op1l is None and op2l is not None \
else Add212(op2h, op1h, op1l) \
if op2l is None and op1l is not None \
else Add211(op1h, op2h)
mem_map[node] = result
return result
elif isinstance(node, Multiplication):
result = Mul222(op1h, op1l, op2h, op2l, fma=fma) \
if op1l is not None and op2l is not None \
else Mul212(op1h, op2h, op2l, fma=fma) \
if op1l is None and op2l is not None \
else Mul212(op2h, op1h, op1l, fma=fma) \
if op2l is None and op1l is not None \
else Mul211(op1h, op2h, fma=fma)
mem_map[node] = result
return result
|
f36b783041f7f6d2b7577db9160d702aa81461bd
| 3,638,730
|
import json
def create_princess_df(spark_session) -> DataFrame:
"""Return a valid DF of disney princesses."""
princesses = [
{
"name": "Cinderella",
"age": 16,
"happy": False,
"items": {"weakness": "thorns", "created": "2020-10-14"},
},
{
"name": "Snow white",
"age": 17,
"happy": True,
"items": {"weakness": "apple", "created": "2020-10-14"},
},
{
"name": "Belle",
"age": 18,
"happy": False,
"items": {"weakness": "roses", "created": "2020-10-14"},
},
{
"name": "Jasmine",
"age": 19,
"happy": True,
"items": {"weakness": "jafar", "created": "2020-10-14"},
},
]
return (
spark_session.read.option("multiline", "true")
.json(spark_session.sparkContext.parallelize([json.dumps(princesses)]))
.select("name", "age", "happy", "items")
)
|
cded63b3882adbb48f7e39f37169f46b55a99ae3
| 3,638,731
|
from pathlib import Path
def get_sheet_names(file_path):
"""
This function returns the first sheet name of the excel file
:param file_path:
:return:
"""
file_extension = Path(file_path).suffix
is_csv = True if file_extension.lower() == ".csv" else False
if is_csv:
return [Path(file_path).name]
xl = pd.ExcelFile(file_path)
return xl.sheet_names
|
826ffb19f21ef117124d747c812a773f1422a10b
| 3,638,732
|
def count_votes(votation_id):
"""
Count number of different vote_key. Its pourpose is to compare with voters.
"""
n = db.session.query(Vote.vote_key).filter(Vote.votation_id == votation_id).distinct().count()
return n
|
81bd75c7185f9e46e0f2c2d2ed23d5717de98cbe
| 3,638,733
|
def get_eng_cv_rate(low_prob):
"""Returns 'low' and 'high' probabilites for student to english I conversion
Simulated data for class enrollment.
Args:
low_prob(float): low end of probability
Returns: dict
"""
np.random.seed(123)
global eng_cv_rate_dict
eng_cv_rate_dict = {'low':low_prob, 'high':np.random.uniform(low = low_prob, high=1.25*low_prob)}
return eng_cv_rate_dict
|
841653ab1d5938d1fd8e7a170fc8d6b4f0326248
| 3,638,734
|
from typing import Type
def dev_unify_nest(args: Type[MultiDev], kwargs: Type[MultiDev], dev, mode, axis=0, max_depth=1):
"""
Unify the input nested arguments, which consist of sub-arrays spread across arbitrary devices, to unified arrays
on the single target device.
:param args: The nested positional arguments to unify.
:type args: MultiDev
:param kwargs: The nested keyword arguments to unify.
:type kwargs: MultiDev
:param dev: The device to unify the nested arguments to.
:type dev: Device
:param mode: The mode by which to unify, must be one of [ concat | mean | sum ]
:type mode: str
:param axis: The axis along which to concattenate the sub-arrays. Default is 0.
:type axis: int, optional
:param max_depth: The maximum nested depth to reach. Default is 1. Increase this if the nest is deeper.
:type max_depth: int, optional
:return: nested arguments unified to the target device
"""
args = args._data if isinstance(args, MultiDevIter) else args
kwargs = kwargs._data if isinstance(kwargs, MultiDevIter) else kwargs
args_uni = ivy.nested_map(args, lambda x: dev_unify(x, dev, mode, axis), max_depth=max_depth)
kwargs_uni = ivy.nested_map(kwargs, lambda x: dev_unify(x, dev, mode, axis), max_depth=max_depth)
return args_uni, kwargs_uni
|
2c78c6fb3eb365c7742a134c33fa8f3bf2622bb2
| 3,638,735
|
def make_preprocessor(transforms=None, device_put=False):
"""
"""
# verify input
if transforms is not None:
if not isinstance(transforms, (list, tuple)):
transforms = (transforms)
for fn in transforms:
if not callable(fn):
raise ValueError("Each element of custom_fns must be callabe")
def preprocess(obs):
# apply custom transforms first
if transforms:
for fn in transforms:
obs = fn(obs)
# convert obs to array
if isinstance(obs, (int, float)):
return jnp.array(obs).reshape((1,))
if not obs.shape:
return obs.reshape((1,))
# put array to device if flag is set
if device_put:
obs = jax.device_put(obs)
return obs
return preprocess
|
09c1b9dc027457a26f7d4b73e3a5572c206fc343
| 3,638,736
|
def timeRangeContainsRange(event1Start, event2Start, event1End, event2End):
"""
Returns true if one set of times starts and ends
within another set of times
@param event1Start: datetime
@param event2Start: datetime
@param event1End: datetime
@param event2End: datetime
@return: boolean
"""
if event2Start <= event1Start and event2End >= event1End:
return True
elif event1Start <= event2Start and event1End >= event2End:
return True
else:
return False
|
05d25969b1f97f2f7015c9ce9bafbffcb931cb9b
| 3,638,737
|
def compute_confidence_intervals(x: np.array, z: float = 1.96) -> float:
"""
Function to compute the confidence interval of the mean of a sample.
Hazra, Avijit. "Using the confidence interval confidently." Journal of thoracic disease 9.10 (2017): 4125.
Formula:
CI = x̅ ± z × (std/√n)
where
CI: Confidence Interval
x̅: Sample Mean
z: Z Statistic for desired confidence interval
std: Sample Standard Deviation
n: Sample Size
"""
return z * (x.std()/len(x)**.5)
|
cd394ec2f4343ac82b16cc18a4ba280d2f57d1ad
| 3,638,738
|
import socket
def SendCommands(cmds, key):
"""Send commands to the running instance of Editra
@param cmds: List of command strings
@param key: Server session authentication key
@return: bool
"""
if not len(cmds):
return
# Add the authentication key
cmds.insert(0, key)
# Append the message end clause
cmds.append(MSGEND)
try:
# Setup the client socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', EDPORT))
# Server expects commands delimited by ;
client.send(u";".join(cmds))
client.shutdown(socket.SHUT_RDWR)
client.close()
except:
return False
else:
return True
|
3db90749c3a88fd9341f6fbb9c4088d000d2d5ef
| 3,638,739
|
import urllib
def esv(value, args=''):
"""
Use ESV API to get a Bible Passage
http://www.esvapi.org/v2/rest/passageQuery?key=IP&passage=Gen+1:5-10&output-format=plain-text
Looking for [[bible PASSAGE]]
Usage::
{{ text|esv}}
{{ text|esv:"option1:value,option2:value"}}
"""
if BIBLE_RE.search(value) is None:
return value
esv_dict = ESV_DICT.copy()
esv_args = args.split(',')
if len(esv_args) > 0:
for arg in esv_args:
try:
key, val = arg.split(':')
if esv_dict.has_key(key):
esv_dict[key] = val
except ValueError:
pass
global ESV_QUERY_URL
ESV_QUERY_URL = ESV_API_URL+'&'.join([k+'='+urllib.quote(str(v)) for (k,v) in esv_dict.items()])
return BIBLE_RE.sub(_get_esv_txt, value)
|
5947071d3363f8308ddeb98a6f862488b942f89b
| 3,638,740
|
import torch
def box_nms(bboxes, scores, labels, threshold=0.5, mode='union'):
"""Non maximum suppression.
source: https://github.com/kuangliu/pytorch-retinanet
Args:
bboxes: (tensor) bounding boxes, sized [N,4].
scores: (tensor) bbox scores, sized [N,].
threshold: (float) overlap threshold.
mode: (str) 'union' or 'min'.
Returns:
keep: (tensor) selected indices.
Reference:
https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py
"""
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
_, order = scores.sort(0, descending=True)
keep = []
while order.numel() > 0:
i = order[0]
keep.append(i)
if order.numel() == 1:
break
label = labels[i]
xx1 = x1[order[1:]].clamp(min=x1[i])
yy1 = y1[order[1:]].clamp(min=y1[i])
xx2 = x2[order[1:]].clamp(max=x2[i])
yy2 = y2[order[1:]].clamp(max=y2[i])
w = (xx2 - xx1 + 1).clamp(min=0)
h = (yy2 - yy1 + 1).clamp(min=0)
inter = w * h
if mode == 'union':
ovr = inter / (areas[i] + areas[order[1:]] - inter)
elif mode == 'min':
ovr = inter / areas[order[1:]].clamp(max=areas[i])
else:
raise TypeError('Unknown nms mode: %s.' % mode)
ids = ((ovr <= threshold) | (labels[order[1:]] != label)).nonzero().squeeze()
if ids.numel() == 0:
break
order = order[ids + 1]
return torch.LongTensor(keep)
|
355262b9af52d0455089a05d90bfa7e21d5d52de
| 3,638,741
|
def abs_length_diff(trg, pred):
"""Computes absolute length difference
between a target sequence and a predicted sequence
Args:
- trg (str): reference
- pred (str): generated output
Returns:
- absolute length difference (int)
"""
trg_length = len(trg.split(' '))
pred_length = len(pred.split(' '))
return abs(trg_length - pred_length)
|
b5baf53609b65aa1ef3b1f142e965fa0606b3136
| 3,638,742
|
def CDLEVENINGSTAR(data: xr.DataArray, penetration: float = 0.3) -> xr.DataArray:
"""
Evening Star (Pattern Recognition)
Inputs:
data:['open', 'high', 'low', 'close']
Outputs:
double series (values are -1, 0 or 1)
"""
return multiple_series_call(talib.CDLEVENINGSTAR, data, ds.TIME, ds.FIELD, [f.OPEN, f.HIGH, f.LOW, f.CLOSE],
[penetration], result_divider=100)
|
6da8ed1782cea60c626829fc7a33210ddd58754e
| 3,638,743
|
def cern_authorized_signup_handler(resp, remote, *args, **kwargs):
"""Handle sign-in/up functionality.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
"""
# Remove any previously stored auto register session key
session.pop(token_session_key(remote.name) + "_autoregister", None)
# Store token in session
# ----------------------
# Set token in session - token object only returned if
# current_user.is_authenticated().
token = response_token_setter(remote, resp)
handlers = current_oauthclient.signup_handlers[remote.name]
# Sign-in/up user
# ---------------
if not current_user.is_authenticated:
account_info = handlers["info"](resp)
account_info_received.send(remote, token=token, response=resp, account_info=account_info)
user = oauth_get_user(
remote.consumer_key,
account_info=account_info,
access_token=token_getter(remote)[0],
)
if user is None:
# Auto sign-up if user not found
form = create_csrf_disabled_registrationform()
form = fill_form(form, account_info["user"])
user = oauth_register(form)
# if registration fails ...
if user is None:
# requires extra information
session[token_session_key(remote.name) + "_autoregister"] = True
session[token_session_key(remote.name) + "_account_info"] = account_info
session[token_session_key(remote.name) + "_response"] = resp
db.session.commit()
return redirect(
url_for(
".signup",
remote_app=remote.name,
)
)
# Authenticate user
if not oauth_authenticate(remote.consumer_key, user, require_existing_link=False):
return current_app.login_manager.unauthorized()
# Link account
# ------------
# Need to store token in database instead of only the session when
# called first time.
token = response_token_setter(remote, resp)
# Setup account
# -------------
if not token.remote_account.extra_data:
account_setup = handlers["setup"](token, resp)
account_setup_received.send(remote, token=token, response=resp, account_setup=account_setup)
db.session.commit()
account_setup_committed.send(remote, token=token)
else:
db.session.commit()
# Redirect to next
if current_user.is_authenticated and not is_egroup_admin():
logout_user()
return redirect(get_post_logout_redirect())
next_url = get_session_next_url(remote.name)
if next_url:
return redirect(next_url)
return redirect(url_for("invenio_oauthclient_settings.index"))
|
df2d8998acb5be4175069507832cd3bf90558824
| 3,638,744
|
def asset_from_iconomi(symbol: str) -> Asset:
"""May raise:
- DeserializationError
- UnsupportedAsset
- UnknownAsset
"""
if not isinstance(symbol, str):
raise DeserializationError(f'Got non-string type {type(symbol)} for iconomi asset')
symbol = symbol.upper()
if symbol in UNSUPPORTED_ICONOMI_ASSETS:
raise UnsupportedAsset(symbol)
name = ICONOMI_TO_WORLD.get(symbol, symbol)
return symbol_to_asset_or_token(name)
|
ae9463d1234cf409eaa52d5fbbdd158444b50b16
| 3,638,746
|
def ConvertHashType(value):
"""
Attempt to convert a space separated series of key=value pairs into a dictionary
of pairs. If any value fails to split successfully an error will be raised.
:param value: Space delimited string of key-value pairs
:return: Dictionary of key-value pairs.
"""
collection = dict()
for option in value.split():
try:
k, v = option.split('=')
except ValueError:
raise ConversionFailure("Invalid option '{}' for key-value pair: {}"
.format(option, value))
collection[k] = v.strip()
return collection
|
91cc47a855958a3ae352cd35a1eea84c5d8e45b4
| 3,638,747
|
def ts_dct_from_estsks(pes_idx, es_tsk_lst, rxn_lst, thy_dct,
spc_dct, run_prefix, save_prefix):
""" build a ts queue
"""
print('\nTasks for transition states requested...')
print('Identifying reaction classes for transition states...')
# Build the ts_dct
ts_dct = {}
for tsk_lst in es_tsk_lst:
obj, es_keyword_dct = tsk_lst[0], tsk_lst[-1]
if obj in ('ts', 'all'):
# want print for task list
method_dct = thy_dct.get(es_keyword_dct['runlvl'])
ini_method_dct = thy_dct.get(es_keyword_dct['inplvl'])
thy_info = tinfo.from_dct(method_dct)
ini_thy_info = tinfo.from_dct(ini_method_dct)
break
# Discern if TS should be reidentified
re_id = False
for tsk_lst in es_tsk_lst:
obj, es_keyword_dct = tsk_lst[:-1], tsk_lst[-1]
if 'find_ts' in obj:
re_id = es_keyword_dct.get('re_id', False)
ts_dct = {}
for rxn in rxn_lst:
ts_dct.update(
ts_dct_sing_chnl(
pes_idx, rxn,
spc_dct, run_prefix, save_prefix,
thy_info=thy_info, ini_thy_info=ini_thy_info, re_id=re_id)
)
# Build the queue
# ts_queue = tuple(sadpt for sadpt in ts_dct) if ts_dct else ()
return ts_dct
# return ts_dct, ts_queue
|
30d07aac9fb2c03b87017878cdb96ed750538c9d
| 3,638,748
|
import re
def parse_msig_storage(storage: str):
"""Parse the storage of a multisig contract to get its counter (as a
number), threshold (as a number), and the keys of the signers (as
Micheline sequence in a string)."""
# put everything on a single line
storage = ' '.join(storage.split('\n'))
storage_regexp = r'Pair\s+?([0-9]+)\s+?([0-9]+)\s+?(.*)\s*'
match = re.search(storage_regexp, storage)
assert match is not None
return {
'counter': int(match[1]),
'threshold': int(match[2]),
'keys': match[3],
}
|
6e04091721177cdd3d40b86717eb86ebbb92a8ff
| 3,638,749
|
def do_index(request):
"""Render the index page."""
projects = [
(name, path)
for name, path in config.all_projects()
if request.user.can_open(path)
]
login_block = render_login_block(request)
html = util.render_template(
config.get_path('www.index_template', 'gbd/core/index.html.tpl'),
locals())
return 200, [('Content-Type', 'text/html; charset=utf-8')], html
|
f5ba6274ebe2e3fce875a44f727d258b536e2e46
| 3,638,751
|
from random import sample
import requests
def sample_coll(word, urns=[], after=5, before=5, sample_size = 300, limit=1000):
"""Find collocations for word in a sample of set of book URNs"""
# check if urns is a list of lists, [[s1, ...],[s2, ...]...] then urn serial first element
# else the list is assumed to be on the form [s1, s2, ....]
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
newurns = [x[0] for x in nb.refine_book_urn(words=[word], urns = urns)]
# Take a sample
sampleurns = sample(newurns, min(len(newurns), sample_size))
# run collocation as normal
r = requests.post("https://api.nb.no/ngram/urncoll",
json = {
'word':word,
'urns':sampleurns,
'after':after,
'before':before,
'limit':limit
}
)
res = pd.DataFrame.from_dict(r.json(), orient='index')
# sort values of resultant set
if not res.empty:
res = res.sort_values(by=res.columns[0], ascending = False)
return res
|
37b343a82a9424fc408b9545a5875ee8bb0f4d9a
| 3,638,752
|
def roll_array(arr: npt.ArrayLike, shift: int, axis: int = 0) -> np.ndarray:
"""Roll the elements in the array by `shift` positions along the given axis.
Parameters
----------
arr : :py:obj:`~numpy.typing.ArrayLike`
input array to roll
shift : int
number of bins to shift by
axis : int
axis to roll along, by default 0
Returns
-------
:py:obj:`~numpy.ndarray`
shifted numpy array
"""
arr = np.asanyarray(arr)
arr_size = arr.shape[axis]
shift %= arr_size
return arr.take(np.concatenate((np.arange(shift, arr_size), np.arange(shift))), axis)
|
79ad44163eb33408021879a0d64f3d0541e97410
| 3,638,753
|
def remove(favourites_list, ctype, pk, **options):
"""Remove a line from the favourites_list. """
instance = unpack_instance_key(favourites_list, ctype, pk)
return favourites_list.remove(instance, options=options)
|
701c153d2f846c8431fae41b06ab5c28617845a3
| 3,638,754
|
def eHealthClass_airFlowWave(*args):
"""eHealthClass_airFlowWave(int air)"""
return _ehealth.eHealthClass_airFlowWave(*args)
|
8af646f62c13f783c4b38524bf727fca038b0b7b
| 3,638,755
|
def bp_symm_func(tensors, sf_spec, rc, cutoff_type):
""" Wrapper for building Behler-style symmetry functions"""
sf_func = {'G2': G2_SF, 'G3': G3_SF, 'G4': G4_SF}
fps = {}
for i, sf in enumerate(sf_spec):
options = {k: v for k, v in sf.items() if k != "type"}
if sf['type'] == 'G3': # Workaround for G3 only
options.update({'rc': rc, 'cutoff_type': cutoff_type})
fp, jacob, jacob_ind = sf_func[sf['type']](
tensors, **options)
fps['fp_{}'.format(i)] = fp
fps['jacob_{}'.format(i)] = jacob
fps['jacob_ind_{}'.format(i)] = jacob_ind
return fps
|
be882f791f4d4b2c9c575d836e9122604b89effa
| 3,638,756
|
def create_dummy_window(show_all=True, should_quit=False, fullscreen=False):
"""
Function to create dummy window which does nothing.
:param show_all: True if window should be shown immediately
:param should_quit: True if window should quit after user closed it
:param fullscreen: True if window should be in full screen mode by default
:return: True if window should be in full screen mode by default
"""
window = Gtk.Window()
if show_all:
window.show_all()
if should_quit:
window.connect("delete-event", Gtk.main_quit)
if fullscreen:
window.fullscreen()
return window
|
b8f7a2bb4bc2531bc9c49ccf8f89f0a23cd93667
| 3,638,757
|
def color_RGB_to_hs(iR: float, iG: float, iB: float) -> tuple[float, float]:
"""Convert an rgb color to its hs representation."""
return color_RGB_to_hsv(iR, iG, iB)[:2]
|
14ae1cd29aca8de29bd3b776fe9a5e752015203d
| 3,638,758
|
def simulation_aggreation_merge(rankings, baseline, method='od'):
"""Merge rankings by running simulation of existing rankings. This would first extract relative position of different ranking results,
and relative position are considered as simulated games. The game results are sent to another ranker that gives merged ranking result.
Parameters
----------
rankings: list of rankings returned by rank of rankers.
baseline: (0, +Inf)
Since we are using relative position of each game player, one should provide a baseline as the least score a team should obtain in the simulated match.
method: {'massey', 'colley', 'keener', 'markov', 'od', 'difference'}
The final ranker applied on simulated games.
Returns
-------
pandas.DataFrame: ['name', 'rating', 'rank']
"""
if not isinstance(rankings, list):
raise ValueError('rankings should be a list of ranker result.')
if not all([isinstance(x, pd.DataFrame) for x in rankings]):
raise ValueError('all items in rankings list should be pandas dataframe.')
vhost = []
vvisit = []
vhscore = []
vvscore = []
for it in rankings:
for i in range(it.shape[0]):
for j in range(i+1, it.shape[0]):
host = it.loc[i, 'name']
visit = it.loc[j, 'name']
delta = it.loc[j, 'rank'] - it.loc[i, 'rank'] # host wins delta score over visit
hscore = baseline if delta<0 else baseline+delta
vscore = baseline-delta if delta<0 else baseline
vhost.append(host)
vvisit.append(visit)
vhscore.append(hscore)
vvscore.append(vscore)
sim = pd.DataFrame(data={
'host': vhost,
'visit': vvisit,
'hscore': vhscore,
'vscore': vvscore
}, columns=['host', 'visit', 'hscore', 'vscore'])
data = Table(data=sim, col = [0, 1, 2, 3])
if method=='massey':
ranker = MasseyRanker(table=data)
return ranker.rank(ascending=False)
elif method=='colley':
ranker = ColleyRanker(table=data)
return ranker.rank(ascending=False)
elif method=='keener':
ranker = KeenerRanker(table=data)
return ranker.rank(ascending=False)
elif method=='markov':
ranker = MarkovRanker(table=data)
return ranker.rank(ascending=False)
elif method=='od':
ranker = ODRanker(table=data)
return ranker.rank(output='summary', ascending=False)
elif method=='difference':
ranker = DifferenceRanker(table=data)
return ranker.rank(ascending=False)
else:
raise ValueError('method not available. Available methods are: massey, colley, keener, markov, od and difference.')
|
56de703d3dc0bee0b8e77c0ac7e21e9f97a8d485
| 3,638,760
|
def maxOverTime(field,makeTimes=0):
"""Take the max of the values in each time step
If makeTimes is true (1) then we return a field mapping all of the times
to the average. Else we just return the max """
return GridMath.maxOverTime(field,makeTimes);
|
53281490d0f42538564aeb701fb312bf2d22d509
| 3,638,761
|
from functools import reduce
from operator import add
def assemble_docstring(parsed, sig=None):
"""
Assemble a docstring from an OrderedDict as returned by
:meth:`nd.utils.parse_docstring()`
Parameters
----------
parsed : OrderedDict
A parsed docstring as obtained by ``nd.utils.parse_docstring()``.
sig : function signature, optional
If provided, the parameters in the docstring will be ordered according
to the parameter order in the function signature.
Returns
-------
str
The assembled docstring.
"""
parsed = parsed.copy()
indent = parsed.pop('indent')
pad = ' '*indent
# Sort 'Parameters' section according to signature
if sig is not None and 'Parameters' in parsed:
order = tuple(sig.parameters.keys())
def sort_index(p):
key = p[0].split(':')[0].strip(' *')
if key == '':
return 9999
return order.index(key)
parsed['Parameters'] = sorted(parsed['Parameters'], key=sort_index)
d = []
for k, v in parsed.items():
if isinstance(v[0], list):
flat_v = reduce(add, v)
else:
flat_v = v
if k is not None:
d.extend(['', pad + k, pad + '-'*len(k)])
d.extend([(pad + l).rstrip() for l in flat_v])
return '\n'.join(d)
|
90553c468a2b113d3f26720128e384b0444d5c93
| 3,638,762
|
import requests
def retrieve_unscoped_token(os_auth_url, access_token, protocol="openid"):
"""Request an unscopped token"""
url = get_keystone_url(
os_auth_url,
"/v3/OS-FEDERATION/identity_providers/egi.eu/protocols/%s/auth" % protocol,
)
r = requests.post(url, headers={"Authorization": "Bearer %s" % access_token})
if r.status_code != requests.codes.created:
raise RuntimeError("Unable to get an unscoped token")
else:
return r.headers["X-Subject-Token"]
|
46d7eb0e057e2e8726effeeac45898c284bb2a4d
| 3,638,763
|
import csv
import tqdm
def load_dataset(csv_path, relative_path):
"""
Inputs
---
csv_path: path to training data csv
relative_path: relative path to training data
Outputs
---
X: Training data numpy array
y: Training labels numpy array
"""
# Read CSV lines
lines = []
with open(csv_path) as csvfile:
reader = csv.reader(csvfile)
print("Loading CSV File ...")
for line in tqdm(reader):
lines.append(line)
images = []; measurements = []
print("Loading Data ...")
# Read from CSV lines
for line in tqdm(lines):
# Center Image
image, measurement = _load_image(line, 0, relative_path)
images.append(image)
measurements.append(measurement)
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -1 * measurement
measurements.append(measurement_flipped)
# Left Image
image, measurement = _load_image(line, 1, relative_path)
images.append(image)
measurements.append(measurement)
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -1 * measurement
measurements.append(measurement_flipped)
# Right Image
image, measurement = _load_image(line, 2, relative_path)
images.append(image)
measurements.append(measurement)
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -1 * measurement
measurements.append(measurement_flipped)
X = np.array(images)
y = np.array(measurements)
return X, y
|
01f6c639a41628ceca0a854c3096ca795a2da972
| 3,638,764
|
from typing import Dict
from typing import List
def randomly_replace_a_zone() -> rd.RouteDict:
"""
读入历史数据,把每条 high quality 的 route 的一个随机的 zone 替换成新 zone
:return: rd.RouteDict: 改过的route
"""
routeDict = rd.loadOrCreate()
rng.seed(3)
new_zone_id = 'zub_fy'
i: int
rid: str
route: rd.Route
for (i, (rid, route)) in enumerate(routeDict.items()):
if route.route_score != "High":
continue
# if i < 55:
# continue
zone_id2stops: Dict[str, List[rd.Stop]] = {}
unique_zone_id_list: List[str] = []
for s in route.stops:
if s.isDropoff() and s.zone_id is not None:
# if s.zone_id == 'C-13.1J':
# print(f'**** {s.idx=}, {s.zone_id=}')
if s.zone_id in zone_id2stops:
zone_id2stops[s.zone_id].append(s)
else:
zone_id2stops[s.zone_id] = [s]
unique_zone_id_list.append(s.zone_id)
zone_idx = rng.randrange(0, len(unique_zone_id_list))
selected_zone_id = unique_zone_id_list[zone_idx]
print(f'route_idx {i}: change zone {selected_zone_id} to {new_zone_id}')
for s in zone_id2stops[selected_zone_id]:
# print(f'---- {s.idx}, zone {s.zone_id} ==> {new_zone_id}')
s.zone_id = new_zone_id
if hasattr(route, 'zones'):
raise RuntimeError
if hasattr(route, 'zones_filled'):
raise RuntimeError
zones, zone_id2zones = route.computeZones() # 用 fill_missing_zone 会把附件没有 zone_id 的加进来
if selected_zone_id in zone_id2zones:
raise RuntimeError
if not (new_zone_id in zone_id2zones):
raise RuntimeError
if len(zone_id2zones[new_zone_id].stops) != len(zone_id2stops[selected_zone_id]):
# print(f' new_zone stops: {[s.idx for s in zone_id2zones[new_zone_id].stops]}')
raise RuntimeError(f'{len(zone_id2zones[new_zone_id].stops)=} != {len(zone_id2stops[selected_zone_id])=}')
return routeDict
|
9bd3de2f35b6a356a610f091391726749a87cc56
| 3,638,765
|
import types
import itertools
def import_loop(schema, mutable, raw_data=None, field_converter=None, trusted_data=None,
mapping=None, partial=False, strict=False, init_values=False,
apply_defaults=False, convert=True, validate=False, new=False,
oo=False, recursive=False, app_data=None, context=None):
"""
The import loop is designed to take untrusted data and convert it into the
native types, as described in ``schema``. It does this by calling
``field_converter`` on every field.
Errors are aggregated and returned by throwing a ``ModelConversionError``.
:param schema:
The Schema to use as source for validation.
:param mutable:
A mapping or instance that can be changed during validation by Schema
functions.
:param raw_data:
A mapping to be converted into types according to ``schema``.
:param field_converter:
This function is applied to every field found in ``instance_or_dict``.
:param trusted_data:
A ``dict``-like structure that may contain already validated data.
:param partial:
Allow partial data to validate; useful for PATCH requests.
Essentially drops the ``required=True`` arguments from field
definitions. Default: False
:param strict:
Complain about unrecognized keys. Default: False
:param apply_defaults:
Whether to set fields to their default values when not present in input data.
:param app_data:
An arbitrary container for application-specific data that needs to
be available during the conversion.
:param context:
A ``Context`` object that encapsulates configuration options and ``app_data``.
The context object is created upon the initial invocation of ``import_loop``
and is then propagated through the entire process.
"""
if raw_data is None:
raw_data = mutable
got_data = raw_data is not None
context = Context._make(context)
try:
context.initialized
except:
if type(field_converter) is types.FunctionType:
field_converter = BasicConverter(field_converter)
context._setdefaults({
'initialized': True,
'field_converter': field_converter,
'trusted_data': trusted_data or {},
'mapping': mapping or {},
'partial': partial,
'strict': strict,
'init_values': init_values,
'apply_defaults': apply_defaults,
'convert': convert,
'validate': validate,
'new': new,
'oo': oo,
'recursive': recursive,
'app_data': app_data if app_data is not None else {}
})
raw_data = context.field_converter.pre(schema, raw_data, context)
_field_converter = context.field_converter
_model_mapping = context.mapping.get('model_mapping')
data = dict(context.trusted_data) if context.trusted_data else {}
errors = {}
if got_data and context.validate:
errors = _mutate(schema, mutable, raw_data, context)
if got_data:
# Determine all acceptable field input names
all_fields = schema._valid_input_keys
if context.mapping:
mapped_keys = (set(itertools.chain(*(
listify(input_keys) for target_key, input_keys in context.mapping.items()
if target_key != 'model_mapping'))))
all_fields = all_fields | mapped_keys
if context.strict:
# Check for rogues if strict is set
rogue_fields = set(raw_data) - all_fields
if rogue_fields:
for field in rogue_fields:
errors[field] = 'Rogue field'
atoms_filter = None
if not context.validate:
# optimization: convert without validate doesn't require to touch setters
atoms_filter = atom_filter.not_setter
for field_name, field, value in atoms(schema, raw_data, filter=atoms_filter):
serialized_field_name = field.serialized_name or field_name
if got_data and value is Undefined:
for key in field.get_input_keys(context.mapping):
if key and key != field_name and key in raw_data:
value = raw_data[key]
break
if value is Undefined:
if field_name in data:
continue
if context.apply_defaults:
value = field.default
if value is Undefined and context.init_values:
value = None
if got_data:
if field.is_compound:
if context.trusted_data and context.recursive:
td = context.trusted_data.get(field_name)
if not all(hasattr(td, attr) for attr in ('keys', '__getitem__')):
td = {field_name: td}
else:
td = {}
if _model_mapping:
submap = _model_mapping.get(field_name)
else:
submap = {}
field_context = context._branch(trusted_data=td, mapping=submap)
else:
field_context = context
try:
value = _field_converter(field, value, field_context)
except (FieldError, CompoundError) as exc:
errors[serialized_field_name] = exc
if context.apply_defaults:
value = field.default
if value is not Undefined:
data[field_name] = value
if isinstance(exc, DataError):
data[field_name] = exc.partial_data
continue
if value is Undefined:
continue
data[field_name] = value
if not context.validate:
for field_name, field, value in atoms(schema, raw_data, filter=atom_filter.has_setter):
data[field_name] = value
if errors:
raise DataError(errors, data)
data = context.field_converter.post(schema, data, context)
return data
|
b3849cd3e4b5cf338a3999954820c5f7d474e406
| 3,638,766
|
def resnet_50(inputs, block_fn=bottleneck_block, is_training_bn=False):
"""ResNetv50 model with classification layers removed."""
layers = [3, 4, 6, 3]
data_format = 'channels_last'
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=64,
kernel_size=7,
strides=2,
data_format=data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = tf.layers.max_pooling2d(
inputs=inputs,
pool_size=3,
strides=2,
padding='SAME',
data_format=data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
inputs = block_group(
inputs=inputs,
filters=64,
blocks=layers[0],
strides=1,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group1',
data_format=data_format)
c3 = block_group(
inputs=inputs,
filters=128,
blocks=layers[1],
strides=2,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group2',
data_format=data_format)
c4 = block_group(
inputs=c3,
filters=256,
blocks=layers[2],
strides=2,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group3',
data_format=data_format)
c5 = block_group(
inputs=c4,
filters=512,
blocks=layers[3],
strides=2,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group4',
data_format=data_format)
return c3, c4, c5
|
901954713b6c77334fc9050ab0c2758d9cad90dd
| 3,638,767
|
def _sanitize_index_element(ind):
"""Sanitize a one-element index."""
if isinstance(ind, Number):
ind2 = int(ind)
if ind2 != ind:
raise IndexError(f"Bad index. Must be integer-like: {ind}")
else:
return ind2
elif ind is None:
return None
else:
raise TypeError("Invalid index type", type(ind), ind)
|
7d006d6ab0081fef01e162df63f76bcff691bbe3
| 3,638,768
|
def overlap_click(original, click_position, sr=44100, click_freq=2000, click_duration=0.5):
"""
:param click_position: Notice that position should be given in second
:return: wave
"""
cwave = librosa.clicks(np.array(click_position), sr=44100, click_freq=4000, click_duration=0.05) / 2
original, wave = mono_pad_or_truncate(original, cwave)
return standardize(original + wave)
|
23a1668648c4b79b82088d149c6bc0b92ecd0326
| 3,638,769
|
import itertools
def vigenere(plaintext: str, *, key: str) -> str:
"""Vigenère cipher (page 48)
- `plaintext` is the message to be encrypted
- `key` defines the series of interwoven Caesar ciphers to be used
"""
plaintext = validate_plaintext(plaintext)
key = validate_key(key)
cycled_cipher_alphabet = itertools.cycle(
_shifted_alphabet(ord(c) - 65) for c in key
)
seq = [next(cycled_cipher_alphabet)[ord(c) - 97] for c in plaintext]
return "".join(seq)
|
c2b0428a86673770f299842b459a44d64423ca52
| 3,638,770
|
def generateVtTick(row, symbol):
"""生成K线"""
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.lastPrice = row['last']
tick.volume = row['volume']
tick.openInterest = row['open_interest']
tick.datetime = row.name
tick.openPrice = row['open']
tick.highPrice = row['high']
tick.lowPrice = row['low']
tick.preClosePrice = row['prev_close']
tick.upperLimit = row['limit_up']
tick.lowerLimit = row['limit_down']
tick.bidPrice1 = row['b1']
tick.bidPrice2 = row['b2']
tick.bidPrice3 = row['b3']
tick.bidPrice4 = row['b4']
tick.bidPrice5 = row['b5']
tick.bidVolume1 = row['b1_v']
tick.bidVolume2 = row['b2_v']
tick.bidVolume3 = row['b3_v']
tick.bidVolume4 = row['b4_v']
tick.bidVolume5 = row['b5_v']
tick.askPrice1 = row['a1']
tick.askPrice2 = row['a2']
tick.askPrice3 = row['a3']
tick.askPrice4 = row['a4']
tick.askPrice5 = row['a5']
tick.askVolume1 = row['a1_v']
tick.askVolume2 = row['a2_v']
tick.askVolume3 = row['a3_v']
tick.askVolume4 = row['a4_v']
tick.askVolume5 = row['a5_v']
return tick
|
108fbbf82eac228772cea90669fe14d90cbe8ccc
| 3,638,771
|
import dataclasses
def hartigan_map_mutations(tree, genotypes, alleles, ancestral_state=None):
"""
Returns a Hartigan parsimony reconstruction for the specified set of genotypes.
The reconstruction is specified by returning the ancestral state and a
list of mutations on the tree. Each mutation is a (node, parent, state)
triple, where node is the node over which the transition occurs, the
parent is the index of the parent transition above it on the tree (or -1
if there is none) and state is the new state.
"""
# The python version of map_mutations allows the ancestral_state to be a string
# from the alleles list, so we implement this at the top of this function although
# it doesn't need to be in the C equivalent of this function
if isinstance(ancestral_state, str):
ancestral_state = alleles.index(ancestral_state)
# equivalent C implementation can start here
genotypes = np.array(genotypes)
not_missing = genotypes != -1
if np.sum(not_missing) == 0:
raise ValueError("Must have at least one non-missing genotype")
num_alleles = np.max(genotypes[not_missing]) + 1
if ancestral_state is not None:
if ancestral_state < 0 or ancestral_state >= len(alleles):
raise ValueError("ancestral_state must be a number from 0..(num_alleles-1)")
if ancestral_state >= num_alleles:
num_alleles = ancestral_state + 1
num_nodes = tree.tree_sequence.num_nodes
# use a numpy array of 0/1 values to represent the set of states
# to make the code as similar as possible to the C implementation.
optimal_set = np.zeros((num_nodes + 1, num_alleles), dtype=np.int8)
for allele, u in zip(genotypes, tree.tree_sequence.samples()):
if allele != -1:
optimal_set[u, allele] = 1
else:
optimal_set[u] = 1
allele_count = np.zeros(num_alleles, dtype=int)
for u in tree.nodes(tree.virtual_root, order="postorder"):
allele_count[:] = 0
for v in tree.children(u):
for j in range(num_alleles):
allele_count[j] += optimal_set[v, j]
if not tree.is_sample(u):
max_allele_count = np.max(allele_count)
optimal_set[u, allele_count == max_allele_count] = 1
if ancestral_state is None:
ancestral_state = np.argmax(optimal_set[tree.virtual_root])
else:
optimal_set[tree.virtual_root] = 1
@dataclasses.dataclass
class StackElement:
node: int
state: int
mutation_parent: int
mutations = []
stack = [StackElement(tree.virtual_root, ancestral_state, -1)]
while len(stack) > 0:
s = stack.pop()
if optimal_set[s.node, s.state] == 0:
s.state = np.argmax(optimal_set[s.node])
mutation = tskit.Mutation(
node=s.node,
derived_state=alleles[s.state],
parent=s.mutation_parent,
)
s.mutation_parent = len(mutations)
mutations.append(mutation)
for v in tree.children(s.node):
stack.append(StackElement(v, s.state, s.mutation_parent))
return alleles[ancestral_state], mutations
|
cbdeb0bc2e23f5a0e2ca1d420a3e78c7476cabc9
| 3,638,772
|
import numpy
def padArray(ori_array, pad_size):
"""
Pads out an array to a large size.
ori_array - A 2D numpy array.
pad_size - The number of elements to add to each of the "sides" of the array.
The padded 2D numpy array.
"""
if (pad_size > 0):
[x_size, y_size] = ori_array.shape
lg_array = numpy.ones((x_size+2*pad_size,y_size+2*pad_size))
lg_array[pad_size:(x_size+pad_size),pad_size:(y_size+pad_size)] = ori_array.astype(numpy.float64)
lg_array[0:pad_size,:] = numpy.flipud(lg_array[pad_size:2*pad_size,:])
lg_array[(x_size+pad_size):(x_size+2*pad_size),:] = numpy.flipud(lg_array[x_size:(x_size+pad_size),:])
lg_array[:,0:pad_size] = numpy.fliplr(lg_array[:,pad_size:2*pad_size])
lg_array[:,(y_size+pad_size):(y_size+2*pad_size)] = numpy.fliplr(lg_array[:,y_size:(y_size+pad_size)])
return lg_array
else:
return ori_array
|
28fac7ccb8fc08c3ac7cf3104fed558128003750
| 3,638,773
|
def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None):
"""Ensure that the process returned a zero exit code indicating success"""
if ok_exit_codes is None:
ok_exit_codes = [0]
out, err = proc.communicate(cmd_input)
ret = proc.returncode
if ret not in ok_exit_codes:
LOG.error("Command '%(cmdline)s' with process id '%(pid)s' expected "
"return code in '%(ok)s' but got '%(rc)s': %(err)s",
{'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes,
'rc': ret, 'err': err})
raise SubprocessException(' '.join(cmdline), ret, out, err)
return out
|
c8aa0f63f019b92b799cafd931f782a6855709ba
| 3,638,774
|
def browse():
"""
A simple browser that doesn't deal with queries at all
"""
page = int(request.args.get("page", 1))
includeHistory = request.args.get("includeHistory", False)
results_per_page, search_offset = results_offset(page)
searchIndex = "history" if includeHistory else "latest"
count, hostdata = current_app.elastic.search(
results_per_page, search_offset, searchIndex=searchIndex
)
totalHosts = current_app.elastic.total_hosts()
if includeHistory:
next_url, prev_url = build_pagination_urls(
"main.browse", page, count, includeHistory=includeHistory
)
else:
# By using the if/else we can avoid putting includeHistory=False into the url that gets constructed
next_url, prev_url = build_pagination_urls("main.browse", page, count)
return render_template(
"main/browse.html",
numresults=count,
totalHosts=totalHosts,
page=page,
hosts=hostdata,
next_url=next_url,
prev_url=prev_url,
)
|
b579bf402d0034d950dcf2f87c08072d5a2959f9
| 3,638,775
|
from typing import cast
def argmax(pda: pdarray) -> np.int64:
"""
Return the index of the first occurrence of the array max value.
Parameters
----------
pda : pdarray
Values for which to calculate the argmax
Returns
-------
np.int64
The index of the argmax calculated from the pda
Raises
------
TypeError
Raised if pda is not a pdarray instance
RuntimeError
Raised if there's a server-side error thrown
"""
repMsg = generic_msg(cmd="reduction", args="{} {}".format("argmax", pda.name))
return parse_single_value(cast(str, repMsg))
|
9fbe515db4e40bf56373a1046a034f15f28d1849
| 3,638,776
|
import torch
def log_cumsum(probs, dim=1, eps=1e-8):
"""Calculate log of inclusive cumsum."""
return torch.log(torch.cumsum(probs, dim=dim) + eps)
|
7f1ab77fd9909037c7b89600c531173dab80c11e
| 3,638,777
|
def iou_coe_Slice_by_Slice(output, target, threshold=0.5, axis=(2, 3,4), smooth=1e-5):
"""Non-differentiable Intersection over Union (IoU) for comparing the similarity
"""
pre = tf.cast(output > threshold, dtype=tf.float32)
truth = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND
union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR
# old axis=[0,1,2,3]
# epsilon = 1e-5
# batch_iou = inse / (union + epsilon)
# new haodong
batch_iou = (inse + smooth) / (union + smooth)
iou = tf.reduce_mean(batch_iou,axis=0, name='iou_coe')
return iou
|
6292c25b8ca9c2fd78dd810703795bfa97877261
| 3,638,778
|
def splitTrainTestDataList(list_data, test_fraction=0.2, sample_size=None, replace=False, seed=None):
"""
Split a list of data into train and test data based on given test fraction.
Each data in the list should have row for sample index, don't care about other axes.
:param list_data: List of data to sample and train-test split.
If only one data provided, then it doesn't have to be in a list.
:type list_data: list/tuple(np.ndarray[n_samples, _])
or np.ndarray[n_samples, _]
:param test_fraction: Fraction of data to use for test.
:type test_fraction: float [0-1], optional (default=0.2)
:param sample_size: Number of samples if doing sampling.
If None, then no sampling is done and all samples are used.
:type sample_size: int or None, optional (default=None)
:param replace: Whether to draw samples with replacement.
:type replace: bool, optional (default=False)
:param seed: Whether to use seed for reproducibility.
If None, then seed is not provided.
:type seed: int or None, optional (default=None)
:return: (Sampled) list of train data and list of test data
:rtype: list(np.ndarray[n_samples_train, _]), list(np.ndarray[n_samples_test, _])
"""
# Ensure list
if isinstance(list_data, np.ndarray):
list_data = [list_data]
elif isinstance(list_data, tuple):
list_data = list(list_data)
# If sample_size is None, use all samples
# Otherwise use provided sample_size but limit it
sample_size = len(list_data[0]) if sample_size is None else int(min(sample_size, len(list_data[0])))
# Indices of the samples randomized
np.random.seed(seed)
rand_indices = np.random.choice(np.arange(len(list_data[0])), len(list_data[0]), replace=replace)
# Train and test data sample size taking into account of sampling
train_size = int(sample_size*(1. - test_fraction))
test_size = sample_size - train_size
# Indices of train and test data after randomization and sampling
indices_train, indices_test = rand_indices[:train_size], rand_indices[train_size:sample_size]
# Go through all provided data
list_data_train, list_data_test = list_data.copy(), list_data.copy()
for i in range(len(list_data)):
# Pick up samples for train and test respectively from index lists prepared earlier
list_data_train[i] = list_data[i][indices_train]
list_data_test[i] = list_data[i][indices_test]
return list_data_train, list_data_test
|
ae14275da02025f6c3c894c33da64d336cd0f5d1
| 3,638,779
|
def create_hyperbounds(hyperparameters):
"""
Gets the bounds of each hyperspace for sampling.
Parameters
----------
* `hyperparameters` [list, shape=(n_hyperparameters,)]
Returns
-------
* `hyperspace_bounds` [list of lists, shape(n_spaces, n_hyperparameters)]
- All combinations of hyperspace bounds.
- Matches the bounds in hyerspaces from create_hyperspace.
"""
hparams_low = []
hparams_high = []
for hparam in hyperparameters:
low, high = check_hyperbounds(hparam)
hparams_low.append(low)
hparams_high.append(high)
all_spaces = fold_spaces(hparams_low, hparams_high)
hyperspace_bounds = []
for space in all_spaces:
hyperspace_bounds.append(space)
return hyperspace_bounds
|
4160e9084cd3f835f327661f4dacad80a348bd11
| 3,638,780
|
def race_from_string(str):
"""Convert race to one of ['white', 'black', None]."""
race_dict = {
"White/Caucasian": 'white',
"Black/African American": 'black',
"Unknown": None,
"": None
}
return race_dict.get(str, 'other')
|
1d38469537c3f5f6a4a42712f5ec1dbd26a471bd
| 3,638,781
|
def test_wrap_coordinates(coords, origin, wgs84):
""" Test whether coordinates wrap around the antimeridian in wgs84 """
lon_under_minus_170 = False
lon_over_plus_170 = False
if isinstance(coords[0], list):
for c in coords[0]:
c = list(transform(origin, wgs84, *c))
if c[0] < -170:
lon_under_minus_170 = True
elif c[0] > 170:
lon_over_plus_170 = True
else:
return False
return lon_under_minus_170 and lon_over_plus_170
|
bd95c3bc1fd4f500e3af7a68af9d5e327656165e
| 3,638,782
|
def filter_dfg_contain_activity(dfg0, start_activities0, end_activities0, activities_count0, activity, parameters=None):
"""
Filters the DFG keeping only nodes that can reach / are reachable from activity
Parameters
---------------
dfg0
Directly-follows graph
start_activities0
Start activities
end_activities0
End activities
activities_count0
Activities count
activity
Activity that should be reachable / should reach all the nodes of the filtered graph
parameters
Parameters
Returns
---------------
dfg
Filtered DFG
start_activities
Filtered start activities
end_activities
Filtered end activities
activities_count
Filtered activities count
"""
if parameters is None:
parameters = {}
# since the dictionaries/sets are modified, a deepcopy is the best option to ensure data integrity
dfg = deepcopy(dfg0)
start_activities = deepcopy(start_activities0)
end_activities = deepcopy(end_activities0)
activities_count = deepcopy(activities_count0)
changed = True
while changed:
changed = False
predecessors = dfg_utils.get_predecessors(dfg, activities_count)
successors = dfg_utils.get_successors(dfg, activities_count)
predecessors_act = predecessors[activity].union({activity})
successors_act = successors[activity].union({activity})
start_activities1 = {x: y for x, y in start_activities.items() if x in predecessors_act}
end_activities1 = {x: y for x, y in end_activities.items() if x in successors_act}
if start_activities != start_activities1 or end_activities != end_activities1:
changed = True
start_activities = start_activities1
end_activities = end_activities1
reachable_nodes = predecessors_act.union(successors_act)
if reachable_nodes != set(activities_count.keys()):
changed = True
activities_count = {x: y for x, y in activities_count.items() if x in reachable_nodes}
dfg = {x: y for x, y in dfg.items() if x[0] in reachable_nodes and x[1] in reachable_nodes}
return dfg, start_activities, end_activities, activities_count
|
3969de1413e70234f76ab8f9e7a52126615d12c3
| 3,638,783
|
def generate_secret_key(length=16):
"""
Generates a key of the given length.
:param length: Length of the key to generate, in bytes.
:type length: :class:`int`
:returns: :class:`str` -- The generated key, in byte string.
"""
return get_random_bytes(length)
|
76fd617e3b316321d2efc09e9f9253970e5b2d2d
| 3,638,784
|
import io
def nouveau_flux(title: str, link: str, description: str) -> parse:
"""
Crée un nouveau flux RSS.
Parameters
----------
title : str
Titre du flux RSS.
link : str
Lien vers le flux RSS.
description : str
Description générale du contenu.
Returns
-------
parse
Arbre XML (ElementTree).
"""
flux = rss.RSS2(title, link, description, pubDate=Dt.now())
# rss.RSS2.write_xml ne retourne pas de valeur, il fait juste écrire dans
# un fichier ou similaire
f = io.StringIO()
flux.write_xml(f)
f.seek(0)
return parse(f)
|
693f4df7c645fceddc288bd3511ec412cc7e9bd7
| 3,638,785
|
def extract_app_name_key():
"""
Extracts the application name redis key and hash from the request
The key should be of format:
<metrics_prefix>:<metrics_application>:<ip>:<rounded_date_time_format>
ie: "API_METRICS:applications:192.168.0.1:2020/08/04:14"
The hash should be of format:
<app_name>
ie: "audius_dapp"
"""
application_name = request.args.get(app_name_param, type=str, default=None)
ip = request.headers.get('X-Forwarded-For', request.remote_addr)
date_time = get_rounded_date_time().strftime(datetime_format)
application_key = f"{metrics_prefix}:{metrics_application}:{ip}:{date_time}"
return (application_key, application_name)
|
5ec563855fc417774368bf9027d8b853739fdc27
| 3,638,788
|
def load(filename, fs, duration, flipud = True, display=False, **kwargs):
"""
Load an image from a file or an URL
Parameters
----------
filename : string
Image file name, e.g. ``test.jpg`` or URL.
fs : scalar
Sampling frequency of the audiogram (in Hz)
duration : scalar
Duration of the audiogram (in s)
flipud : boolean, optional, default is True
Vertical flip of the matrix (image)
display : boolean, optional, default is False
if True, display the image
**kwargs, optional. This parameter is used by plt.plot
figsize : tuple of integers, optional, default: (4,10)
width, height in inches.
title : string, optional, default : 'Spectrogram'
title of the figure
xlabel : string, optional, default : 'Time [s]'
label of the horizontal axis
ylabel : string, optional, default : 'Amplitude [AU]'
label of the vertical axis
cmap : string or Colormap object, optional, default is 'gray'
See https://matplotlib.org/examples/color/colormaps_reference.html
in order to get all the existing colormaps
examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic',
'viridis'...
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
ext : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
dpi : integer, optional, default is 96
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
format : string, optional, default is 'png'
Format to save the figure
... and more, see matplotlib
Returns
-------
im : ndarray
The different color bands/channels are stored in the
third dimension, such that a gray-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
ext : list of scalars [left, right, bottom, top], optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
dt : scalar
Time resolution of the spectrogram (horizontal x-axis)
df : scalar
Frequency resolution of the spectrogram (vertical y-axis)
"""
print(72 * '_' )
print("loading %s..." %filename)
# Load image
im = imread(filename, as_gray=True)
# if 3D, convert into 2D
if len(im.shape) == 3:
im = im[:,:,0]
# Rescale the image between 0 to 1
im = linear_scale(im, minval= 0.0, maxval=1.0)
# Get the resolution
df = fs/(im.shape[0]-1)
dt = duration/(im.shape[1]-1)
# Extent
ext = [0, duration, 0, fs/2]
# flip the image vertically
if flipud: im = np.flip(im, 0)
# Display
if display :
ylabel =kwargs.pop('ylabel','Frequency [Hz]')
xlabel =kwargs.pop('xlabel','Time [sec]')
title =kwargs.pop('title','loaded spectrogram')
cmap =kwargs.pop('cmap','gray')
figsize=kwargs.pop('figsize',(4, 13))
vmin=kwargs.pop('vmin',0)
vmax=kwargs.pop('vmax',1)
_, fig = plot2D (im, extent=ext, figsize=figsize,title=title,
ylabel = ylabel, xlabel = xlabel,vmin=vmin, vmax=vmax,
cmap=cmap, **kwargs)
return im, ext, dt, df
|
820be7b1cac4dd6e1b2e06cd28b01f47536930ad
| 3,638,789
|
def get_status(lib, device_id):
"""
A function of reading status information from the device
You can use this function to get basic information about the device status.
:param lib: structure for accessing the functionality of the libximc library.
:param device_id: device id.
"""
x_status = status_t()
result = lib.get_status(device_id, byref(x_status))
if result == Result.Ok:
return x_status
else:
return None
|
2321c1d61ec2566c4480c55c477c0fb598855df6
| 3,638,790
|
import json
from pathlib import Path
import heapq
def query_by_image_objects(image_path, weights_path, cfg_path, names_path,
confidence_threshold=0.5, save=False):
"""Processes user-uploaded image to retrieve similar images from database.
First, all the objects in the image are detected using the :method:
``rubrix.images.detect.detect_objects``. Next, the image descriptor array
for the user-uploaded image is compared with that of all pruned images so
as to retrieve the top-5 results.
Arguments:
----------
image_path (numpy.ndarray):
Path for user-uploaded image, for reverse-image search.
weights_path (pathlib.Path):
Path to YOLOv4 pretrained weights file.
cfg_path (pathlib.Path):
Path to darknet configuration file.
names_path (pathlib.Path):
Path to darknet names file.
save (bool):
If True, save predictions to /assets/predictions.
Returns:
--------
results (list of pathlib.Path objects):
List of paths to images retrieved for user query.
"""
# Retrieve image descriptor vector for user-uploaded image.
array = extract_image_descriptors(image_path, 'inception', TARGET_SIZE)
array = array.reshape(-1)
# Retrieve YOLOv4 model related variables to detect objects in an image.
net = get_yolo_net(cfg_path, weights_path)
labels = get_labels(names_path)
image = cv2.imread(str(image_path))
objects = detect_objects(net, labels, image, confidence_threshold)
index_path = pathfinder.get('assets', 'index.json')
with open(index_path, 'r') as json_file:
index = json.load(json_file)
paths_to_images = set([])
for object in objects:
paths_to_images |= set(index[object])
descriptors_path = pathfinder.get('assets', 'data', 'descriptors')
results = []
for path in paths_to_images:
path = Path(path)
other_array = np.load(descriptors_path / f'{path.stem}.npy')
score = dot_product(array, other_array)
results.append(ReverseSearchResultObject(
name=path.name,
path_to_image=path,
score=score,
)
)
# Using heaps to extract N largest results from a list of n elements
# is recommended, as the time complexity to do so is O(n * logN), which
# is approximately O(n) if N is relatively small.
results = heapq.nlargest(5, results)
results = [result.path_to_image for result in results]
if save:
# Save predictions to /assets/predictions.
save_predictions(results)
return results
|
dc718b3b5fe5f513b9281c5db30516ed32c8246f
| 3,638,791
|
def suspend_supplier_services(client, logger, framework_slug, supplier_id, framework_info, dry_run):
"""
The supplier ID list should have been flagged by CCS as requiring action, but double check that the supplier:
- has some services on the framework
- has `agreementReturned: false`
- has not `agreementReturned: on-hold
:param client: API client instance
:param framework_info: JSON
:param dry_run: don't suspend if True
:return: suspended_service_count
:rtype: int
"""
suspended_service_count = 0
# Ignore any 'private' services that the suppliers have removed themselves
new_service_status, old_service_status = 'disabled', 'published'
if not framework_info['frameworkInterest']['onFramework']:
logger.error(f'Supplier {supplier_id} is not on the framework.')
return suspended_service_count
if framework_info['frameworkInterest']['agreementReturned']:
logger.error(f'Supplier {supplier_id} has returned their framework agreement.')
return suspended_service_count
if framework_info['frameworkInterest']['agreementStatus'] == 'on-hold':
logger.error(f"Supplier {supplier_id}'s framework agreement is on hold.")
return suspended_service_count
# Find the supplier's non-private services on this framework
services = client.find_services(
supplier_id=supplier_id, framework=framework_slug, status=old_service_status
)
if not services['services']:
logger.error(f'Supplier {supplier_id} has no {old_service_status} services on the framework.')
return suspended_service_count
# Suspend all services for each supplier (the API will de-index the services from search results)
logger.info(
f"Setting {services['meta']['total']} services to '{new_service_status}' for supplier {supplier_id}."
)
for service in services['services']:
if dry_run:
logger.info(f"[DRY RUN] Would suspend service {service['id']} for supplier {supplier_id}")
else:
client.update_service_status(service['id'], new_service_status, "Suspend services script")
suspended_service_count += 1
# Return suspended service count (i.e. if > 0, some emails need to be sent)
return suspended_service_count
|
6442bf7f287126c6e1fe445fa9bca1ccde4d142f
| 3,638,792
|
def get_campaigns_with_goal_id(campaigns, goal_identifer):
"""Returns campaigns having the same goal_identifier passed in the args
from the campaigns list
Args:
campaigns (list): List of campaign objects
gaol_identifier (str): Global goal identifier
Returns:
tuple (campaign_goal_list, campaigns_without_goal): campaign_goal_list is a tuple
of (campaign, campaign_goal)
"""
campaign_goal_list = []
campaigns_without_goal = []
for campaign in campaigns:
campaign_goal = get_campaign_goal(campaign, goal_identifer)
if campaign_goal:
campaign_goal_list.append((campaign, campaign_goal))
else:
campaigns_without_goal.append(campaign)
return campaign_goal_list, campaigns_without_goal
|
83d77ee2e6c1b9b5025a24e996e573fe816dd4b7
| 3,638,794
|
def tautologically_define_state_machine_transitions(state_machine):
"""Create a mapping of all transitions in ``state_machine``
Parameters
----------
state_machine : super_state_machine.machines.StateMachine
The state machine you want a complete map of
Returns
-------
dict
Dictionary of all transitions in ``state_machine``
Structured as
{from_state1: [(to_state, allowed), ...],
from_state2: [(to_state, allowed), ...],
}
where
- ``allowed`` is a boolean
- ``from_stateN`` is a string
- ``to_state`` is a string
"""
transitions_as_enum = state_machine.__class__._meta['transitions']
transitions_as_names = {
to_state.value: [from_state.value for from_state in from_states]
for to_state, from_states in transitions_as_enum.items()}
transition_map = defaultdict(list)
all_states = set(state_machine.States.states())
for to_state, from_states in transitions_as_names.items():
for from_state in all_states:
allowed = True
if from_state not in from_states:
allowed = False
transition_map[to_state].append((from_state, allowed))
return transition_map
|
0e7300a616811e26481b9a66ee8f22336fbd5943
| 3,638,795
|
from typing import List
def topk_errors(preds: Tensor, labels: Tensor, ks: List[int]):
"""
Computes the top-k error for each k.
Args:
preds (array): array of predictions. Dimension is N.
labels (array): array of labels. Dimension is N.
ks (list): list of ks to calculate the top accuracies.
"""
num_topks_correct = topks_correct(preds, labels, ks)
return [(1.0 - x / preds.size(0)) for x in num_topks_correct]
|
39a69f745eb789df4a47a776f7c84fa0b7a8b25a
| 3,638,796
|
def extract_data_from_inspect(network_name, network_data):
"""
:param network_name: str
:param network_data: dict
:return: dict:
{
"ip_address4": "12.34.56.78"
"ip_address6": "ff:fa:..."
}
"""
a4 = None
if network_name == "host":
a4 = "127.0.0.1"
n = {}
a4 = graceful_chain_get(network_data, "IPAddress") or a4
if a4:
n["ip_address4"] = a4
a6 = graceful_chain_get(network_data, "GlobalIPv6Address")
if a6:
n["ip_address4"] = a6
return n
|
431eb3f7bda8c5e4d5580bc4be19185223d39c4d
| 3,638,797
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.