content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def if_analyser(string):
"""调用python的eval函数计算True false"""
trans = sign_transform(string.strip().lower())
# print('if_analyser>>', trans)
boool = eval(trans)
boool = 1 if boool else 0
return boool
|
a27469a6c23a53f0131e8135600c6dc7d596cdbb
| 3,644,245
|
def zzX_trunc(f, p):
"""Reduce Z[X] polynomial modulo polynomial p. """
return zzX_strip([ zzX_rem(g, p) for g in f ])
|
9e80862a229b1a0689dea01fef865997ee87d1f9
| 3,644,246
|
from typing import List
def _format_bin_intervals(bins_arr: np.ndarray) -> List[str]:
"""
Auxillary function to format bin intervals in a histogram
Parameters
----------
bins_arr: np.ndarray
Bin endpoints to format into intervals
Returns
-------
List of formatted bin intervals
"""
bins_arr = np.round(bins_arr, 3)
intervals = [f"[{bins_arr[i]},{bins_arr[i+1]})" for i in range(len(bins_arr) - 2)]
intervals.append(f"[{bins_arr[-2]},{bins_arr[-1]}]")
return intervals
|
96d3a89fc3427bf33abe5c44a04061694ae7b2b3
| 3,644,247
|
from typing import List
from typing import Dict
from typing import Callable
def get_embedder_functions(corpus: List[str]) -> Dict[str, Callable[[List[str]], List[float]]]:
"""
Returns a list of the available embedders.
#! If updated, update next function too
"""
embedders = {
# 'Bag of Words': bow_embedder(corpus),
'FastText (CBOW)': fasttext_embedder(corpus, model_type="cbow"),
'FastText (Skipgram)': fasttext_embedder(corpus, model_type="skipgram"),
'Doc2Vec': doc2vec_embedder(corpus),
'GPT2 Small Spanish': bert_embedder(model_name="datificate/gpt2-small-spanish"),
'BERT: TinyBERT-spanish-uncased-finetuned-ner':
bert_embedder(model_name='mrm8488/TinyBERT-spanish-uncased-finetuned-ner'),
'BERT: paraphrase-xlm-r-multilingual-v1': bert_embedder(model_name='paraphrase-xlm-r-multilingual-v1'),
'BERT: distiluse-base-multilingual-cased-v2': bert_embedder(model_name='distiluse-base-multilingual-cased-v2'),
}
reduced_embedders = {}
for name, embedder in embedders.items():
reduced_embedders[f"{name} (50-d)"] = reduce_dimensionality(embedder)
return {**embedders, **reduced_embedders}
|
6e1d4ddd41725a26b940c7d108ea552366ab6c9b
| 3,644,248
|
import logging
def test_stimeit():
""" Test the stimeit function """
dummy_function = lambda x: x + 2
@vtime.stimeit(logging.info)
def stimeit_function(x):
return dummy_function(x)
assert dummy_function(42) == stimeit_function(42)
|
6d5cf6d261871cb466b71e93dbecfafff1731727
| 3,644,249
|
import yaml
def parse_thermal_properties(f):
"""thermal_properties.yaml parser."""
thermal_properties = {
"temperatures": [],
"free_energy": [],
"entropy": [],
"heat_capacity": [],
}
data = yaml.load(f, Loader=Loader)
for tp in data["thermal_properties"]:
thermal_properties["temperatures"].append(tp["temperature"])
thermal_properties["entropy"].append(tp["entropy"])
thermal_properties["free_energy"].append(tp["free_energy"])
thermal_properties["heat_capacity"].append(tp["heat_capacity"])
for key in thermal_properties:
thermal_properties[key] = np.array(thermal_properties[key])
tprops = get_thermal_properties(thermal_properties)
return tprops
|
4cc0020849e6ec1202fd2138f0bc86e5abfadf3b
| 3,644,250
|
def convert(ts, new_freq, include_partial=True, **kwargs):
"""
This function converts a timeseries to another frequency. Conversion only
works from a higher frequency to a lower frequency, for example daily to
monthly.
NOTE: add a gatekeeper for invalid kwargs.
"""
new_ts = ts.clone()
series_dir = ts.series_direction()
new_ts.sort_by_date(reverse=True)
freq_idx = HIERARCHY.index(ts.frequency)
new_idx = HIERARCHY.index(FREQ_Q)
daily_idx = HIERARCHY.index(FREQ_D)
if freq_idx > new_idx:
raise ValueError(
"Cannot convert from %s to %s." % (ts.frequency, new_freq)
)
dates = new_ts.datetime_series()
date_series_type = ts.get_date_series_type()
if date_series_type == TS_ORDINAL:
selected = _filter_dates(dates, new_freq, kwargs)
elif date_series_type == TS_TIMESTAMP:
selected = _filter_idates(
dates, new_freq, end_of_period=ts.end_of_period
)
else:
raise ValueError("Invalid date series type: %s" % (date_series_type))
if selected.shape[0] > 0:
if new_ts.end_of_period:
selected += 1 # shift to start of next period
if include_partial or freq_idx > daily_idx:
if selected[0] != 0:
# insert most recent date
# selected = np.insert(selected, 0, 0)
# np.insert(arr, obj, values, axis=None)
selected = np.insert(selected, 0, 0)
if freq_idx > daily_idx:
# already processed (probably)
if selected[-1] != len(dates) - 1:
selected = np.append(selected, len(dates) - 1)
new_ts.tseries = new_ts.tseries[selected.flatten()]
new_ts.frequency = new_freq
if new_freq == FREQ_D:
# convert dates from timestamp to ordinal
new_ts.dseries = np.fromiter(
[date.toordinal() for date in np.array(dates)[selected]],
dtype=np.int32,
)
else:
new_ts.dseries = new_ts.dseries[selected]
new_ts.dseries = new_ts.dseries.flatten()
if series_dir != new_ts.series_direction():
new_ts.reverse()
return new_ts
|
a6b8daf6092052c0d7872d4b9a75edbe10bc15e5
| 3,644,251
|
def _get_image_info(name: str) -> versions.Image:
"""Retrieve an `Image` information by name from the versions listing."""
try:
return versions.CONTAINER_IMAGES_MAP[name]
except KeyError:
raise ValueError(
'Missing version for container image "{}"'.format(name)
)
|
4a328d6924adc3c826a6a01c46a27e6380d5d89a
| 3,644,252
|
def _mother_proc_cpp_stat(
amplitude_distribution, t_stop, rate, t_start=0 * pq.ms):
"""
Generate the hidden ("mother") Poisson process for a Compound Poisson
Process (CPP).
Parameters
----------
amplitude_distribution : np.ndarray
CPP's amplitude distribution :math:`A`. `A[j]` represents the
probability of a synchronous event of size `j` among the generated
spike trains. The sum over all entries of :math:`A` must be equal to
one.
t_stop : pq.Quantity
The stopping time of the mother process
rate : pq.Quantity
Homogeneous rate of the n spike trains that will be generated by the
CPP function
t_start : pq.Quantity, optional
The starting time of the mother process
Default: 0 pq.ms
Returns
-------
Poisson spike train representing the mother process generating the CPP
"""
n_spiketrains = len(amplitude_distribution) - 1
# expected amplitude
exp_amplitude = np.dot(
amplitude_distribution, np.arange(n_spiketrains + 1))
# expected rate of the mother process
exp_mother_rate = (n_spiketrains * rate) / exp_amplitude
return StationaryPoissonProcess(
rate=exp_mother_rate, t_stop=t_stop, t_start=t_start
).generate_spiketrain()
|
90ea9272c1a5541ea5c278960369ea301b31d01a
| 3,644,253
|
import telegram
import urllib
def get_service(hass, config):
"""Get the Telegram notification service."""
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_API_KEY, 'chat_id']},
_LOGGER):
return None
try:
bot = telegram.Bot(token=config[CONF_API_KEY])
username = bot.getMe()['username']
_LOGGER.info("Telegram bot is '%s'.", username)
except urllib.error.HTTPError:
_LOGGER.error("Please check your access token.")
return None
return TelegramNotificationService(config[CONF_API_KEY], config['chat_id'])
|
474efeccaef641ba50042a036d5edf6d6e86f90c
| 3,644,254
|
def has_numbers(input_str: str):
""" Check if a string has a number character """
return any(char.isdigit() for char in input_str)
|
5038cb737cdcfbad3a7bd6ac89f435559b67cebc
| 3,644,255
|
def get_report_permission(report: Report, user: User) -> Permission:
"""Get permission of given user for the report.
:param report: The report
:type report: Report
:param user: The user whose permissions are to be checked
:type user: User
:return: The user's permissions for the report
:rtype: Permission
"""
if 'reports' in session and str(report.id) in session['reports']:
return session['reports'][str(report.id)]
rp = ReportPermission.query.filter_by(ReportId=report.id, UserId=user.id).first()
if rp is None and user.Role == 's':
return ADMIN_DEFAULT_PERMISSION
if rp is None:
return 'n'
return rp.Type
|
bb09d744c133a4c9212ab6c6e2ba345bb9c8f78f
| 3,644,257
|
def peak_time_from_sxs(
sxs_format_waveform,
metadata,
extrapolation_order='Extrapolated_N2'):
"""Returns the time when the sum of the squared amplitudes of an
SXS-format waveform is largest. Note: this is not necessarily the time of
the peak of the l=m=2 mode."""
extrap = extrapolation_order + ".dir"
# All modes have the same time, so just look at the l=m=2 mode to get the
# times
times = sxs_format_waveform[extrapolation_order +
".dir"]['Y_l2_m2.dat'][:, 0]
start = first_index_before_reference_time(times, metadata)
sum_amp_squared = waveform_norm_squared(
sxs_format_waveform, extrapolation_order)
index_peak = start + sum_amp_squared[start:].argmax()
return sxs_format_waveform[extrap]['Y_l2_m2.dat'][index_peak][0]
|
1ad4b593db3aa3d74170056f2c32d4108ec05a48
| 3,644,258
|
def create_trackhub_resource(project_dir, api_client, create_user_resource, create_genome_assembly_dump_resource):
"""
This fixture is used to create a temporary trackhub using POST API
The created trackhub will be used to test GET API
"""
_, token = create_user_resource
api_client.credentials(HTTP_AUTHORIZATION='Token ' + str(token))
submitted_hub = {
'url': 'file:///' + str(project_dir) + '/' + 'samples/JASPAR_TFBS/hub.txt'
}
response = api_client.post('/api/trackhub/', submitted_hub, format='json')
return response
|
a81db1e7c9c95355457d9f6c4ec4c6428e1a77a7
| 3,644,259
|
def create_node(x, y):
"""Create a node along the network.
Parameters
----------
x : {float, int}
The x coordinate of a point.
y : {float, int}
The y coordinate of a point.
Returns
-------
_node : shapely.geoemtry.Point
Instantiated node.
"""
_node = Point(list(zip(x, y))[0])
return _node
|
d8645b77a3d843bf3855522c63156916432ae899
| 3,644,260
|
from typing import Match
def get_matchroom_name(match: Match) -> str:
"""Get a new unique channel name corresponding to the match.
Parameters
----------
match: Match
The match whose info determines the name.
Returns
-------
str
The name of the channel.
"""
name_prefix = match.matchroom_name
cut_length = len(name_prefix) + 1
largest_postfix = 1
found = False
for channel in server.server.channels:
if channel.name.startswith(name_prefix):
found = True
try:
val = int(channel.name[cut_length:])
largest_postfix = max(largest_postfix, val)
except ValueError:
pass
return name_prefix if not found else '{0}-{1}'.format(name_prefix, largest_postfix + 1)
|
404d21b6f88918204aa287c9227640c03f47b916
| 3,644,261
|
import re
def unidecode_name(uname):
"""
unidecode() of cjk ideograms can produce strings which contain spaces.
Strip leading and trailing spaces, and reduce double-spaces to single.
For some other ranges, unidecode returns all-lowercase names; fix these
up with capitalization.
"""
# Fix double spacing
name = unidecode.unidecode(uname)
if name == uname:
return name
name = re.sub(' +', ' ', name.strip().replace('@', '').replace('"', ''))
name = re.sub(r'(\w)\.(\w)', r'\1\2', name)
# Fix all-upper and all-lower names:
# Check for name particles -- don't capitalize those
m = name_particle_match(name)
particle = m.group(1) if m else None
# Get the name parts
prefix, first, middle, last, suffix = name_parts(name)
# Capitalize names
first = first.title()
middle = ' '.join([ capfirst(p) for p in middle.split() ])
last = ' '.join([ capfirst(p) for p in last.split() ])
if len(last) == 1:
last = (last+last).capitalize()
# Restore the particle, if any
if particle and last.startswith(capfirst(particle)+' '):
last = ' '.join([ particle, last[len(particle)+1:] ])
# Recombine the parts
parts = prefix, first, middle, last, suffix
name = ' '.join([ p for p in parts if p and p.strip() != '' ])
name = re.sub(' +', ' ', name)
return name
|
16676453059b53b3e397f33630e00de66f3585b9
| 3,644,262
|
def scnet50(**kwargs):
"""
SCNet-50 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, model_name="scnet50", **kwargs)
|
c900c5a0da1f4f0960ced2ba36fb9785a7340f4a
| 3,644,263
|
def xception_block(inputs, depth_list, prefix, skip_connect_type, stride, rate=1,
depth_activation=False, return_skip=False):
"""用于构建xception,同样用到了残差结构,但是将卷积换成了 深度可分离卷积(depthwise + pointwise + conv 1x1)"""
residual = inputs
for i in range(3):
# depthwise + pointwise + conv2d
residual = sep_layer(residual, depth_list[i], prefix + '_separable_conv{}'.format(i + 1),
stride=stride if stride == 2 else 1, rate=rate, depth_activation=depth_activation)
if i == 1:
skip = residual # 两次: depth_wise + conv2d
if skip_connect_type == 'conv':
# 采用跳跃连接: 输入经过侧边conv后与主路输出相加
shortcut = conv_same_layer(inputs, depth_list[-1], prefix + '_shortcut', k_size=1, stride=stride)
shortcut = layers.BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
output = layers.Add()([residual, shortcut])
elif skip_connect_type == 'sum':
# 采用跳跃连接直接与输入相加
output = layers.Add()([residual, shortcut])
elif skip_connect_type == 'none':
# 不采用跳跃连接
output = residual
if return_skip:
# output是整个block的输出,skip只是主路的经过两次sep_conv的输出
return output, skip
else:
return output
|
3a8eaf7cb73216039411ec8fddd7bfb8c81604a6
| 3,644,264
|
def auth_test():
"""
Test's the endpoint authenticiation works.
:return:
"""
return "hello"
|
7c65897d83b0af41307aec28d7f2ce3d6852f8b7
| 3,644,265
|
def cnn_2x_lstm_siamese(voc_size, max_len, dropout=0.5):
"""Two siamese branches, each embedding a statement.
Binary classifier on top.
Args:
voc_size: size of the vocabulary for the input statements.
max_len: maximum length for the input statements.
dropout: Fraction of units to drop.
Returns:
A Keras model instance.
"""
pivot_input = layers.Input(shape=(max_len,), dtype='int32')
statement_input = layers.Input(shape=(max_len,), dtype='int32')
x = layers.Embedding(
output_dim=256,
input_dim=voc_size,
input_length=max_len)(pivot_input)
x = layers.Convolution1D(256, 7, activation='relu')(x)
x = layers.MaxPooling1D(3)(x)
x = layers.Convolution1D(256, 7, activation='relu')(x)
x = layers.MaxPooling1D(5)(x)
embedded_pivot = layers.LSTM(256)(x)
encoder_model = Model(pivot_input, embedded_pivot)
embedded_statement = encoder_model(statement_input)
concat = layers.merge([embedded_pivot, embedded_statement], mode='concat')
x = layers.Dense(256, activation='relu')(concat)
x = layers.Dropout(dropout)(x)
prediction = layers.Dense(1, activation='sigmoid')(x)
model = Model([pivot_input, statement_input], prediction)
return model
|
d0eec28b8e91bed77fbc84bd085cae337da04c61
| 3,644,266
|
def roll_function(positions, I, angular_velocity):
"""
Due to how the simulations are generated where the first point of the simulation
is at the smallest x value and the subsequent positions are in a clockwise
(counterclockwise) direction when the vorticity is positive (negative), the first
point of the simulated intensity might lie in the middle of an intensity trace.
This needs to be compensated for by rolling array elements. Simulations come onto
the screen from one of 4 sides. Which side the sim comes onto the screen and
which side the sim leaves the screen defines how to roll the intensity as a function
of time such that the first returned position is at the entrance and the final returned
position is at the exit.
Args:
positions (array): position of Particle
I (array): intensities calculated as a function of position
angular velocity (float): Particle angular velocity
Returns:
p (array): position of Particle, adjusted to preserve order of peaks
I (array): intensities calculated as a function of p, adjusted to preserve order of peaks
"""
p = positions.T
x_0 = p[0][0]
y_0 = p[1][0]
clockwise = True
if angular_velocity < 0:
clockwise = False
roll = 0
if clockwise:
if (x_0>0) and (y_0>0) and (y_0<616):
# need to roll
if 616/2 > y_0: # orbit starts in upper half of screen
try:
rollval = -np.argwhere(p[1][:(len(p[1])//4+1)]==0)[0]
except IndexError: #if none of the points is actually equal to 0
rollval = -np.abs(p[1][:(len(p[1])//4+1)]).argmin()
p = np.roll(p,rollval,axis=1)
I = np.roll(I,rollval)
else: #orbit starts in middle or lower half of screen
try:
rollval = np.argwhere(p[1]==616)[0]+len(p[1])//2
except IndexError: #if none of the points is actually equal to 0
rollval = np.abs(p[1][3*(len(p[1])//4):]).argmin()
p = np.roll(p,rollval,axis=1)
I = np.roll(I,rollval)
else:
print('need to implement this still... rolling for counterclockwise vorticity.')
raise ValueError
return p.T, I
|
79e9e3fbcdd2bfc1f2f9108f17aeeeb13fc6339d
| 3,644,267
|
def top10():
"""Renders the top 10 page."""
top10_urls = ShortURL.query.order_by(ShortURL.hits.desc()).limit(10)
return render_template("top10.html", urls=top10_urls)
|
781c7e65b94894e1292e626c163dfecb1d966678
| 3,644,269
|
def rename_group(str_group2=None):
"""
Rename OFF food group (pnns_group_2) to a standard name
Args:
str_group2 (str): OFF food group name
Returns:
conv_group (str): standard food group name
"""
#convert_group1 = {'Beverage':['Beverages'],
# 'Cereals':['Cereals and potatoes'],
# 'Meal':['Composite foods'],
# 'Fat':['Fat and sauces'],
# 'Meat':['Fish Meat Eggs'],
# 'Fruits and vegetables':['Fruits and vegetables','fruits-and-vegetables'],
# 'Dairy':['Milk and dairy products'],
# 'Snack':['Salty snacks','Sugary snacks','sugary-snacks'],
# None:[None,'unknown','']}
convert_group2 = {'Beverage':['Alcoholic beverages','Artificially sweetened beverages',
'Fruit juices','Fruit nectars','Non-sugared beverages',
'Sweetened beverages'],
'Cereals':['Bread','Breakfast cereals','Cereals','Legumes','Patatoes'],
'Meal':['One-dish meals','Pizza pies and quiche','Sandwich'],
'Fat':['Dressings and sauces','Fats'],
'Meat':['Tripe dishes','Eggs','Fish and seafood','Meat','Processed meat','Nuts'],
'Fruit':['Fruits','fruits','Dried fruits'],
'Vegetable':['Soups','Vegetables','vegetables'],
'Dairy':['Cheese','Dairy desserts','Ice cream','Milk and yogurt'],
'Snack':['Appetizers','Salty and fatty products','Biscuits and cakes',
'Chocolate products','Sweets','pastries'],
None:[None,'unknown','']}
conv_group = [key for (key, value) in convert_group2.items() if (str_group2 in value)]
conv_group = [None] if not conv_group else conv_group
return conv_group[0]
|
31b52f600fe3a087f8b230c880ae55f0dd63264e
| 3,644,270
|
def run_basic():
"""Check that the windows all open ok (i.e. is GUI functioning?)."""
_initialize()
s = 'Simulation'
p = 'Plots'
menu_paths = [ (s,'Test Pattern'),
(s,'Model Editor'),
(p,'Activity'),
(p,'Connection Fields'),
(p,'Projection'),
(p,'Projection Activity'),
(p,'Preference Maps','Orientation Preference'),
(p,'Tuning Curves','Orientation Tuning') ]
return ft.run([_menu_item_fn(*x) for x in menu_paths],"Running basic GUI tests...")
|
e90546b7312b9c7de5fd812784d91c5ef1c9f22f
| 3,644,273
|
def node_to_evenly_discretized(node):
"""
Parses the evenly discretized mfd node to an instance of the
:class: openquake.hazardlib.mfd.evenly_discretized.EvenlyDiscretizedMFD,
or to None if not all parameters are available
"""
if not all([node.attrib["minMag"], node.attrib["binWidth"],
node.nodes[0].text]):
return None
# Text to float
rates = [float(x) for x in node.nodes[0].text.split()]
return mfd.evenly_discretized.EvenlyDiscretizedMFD(
float(node.attrib["minMag"]),
float(node.attrib["binWidth"]),
rates)
|
168bf8efcacac4eaf5832bbab4b3708e8187d5dd
| 3,644,274
|
from typing import Collection
def delete_comment(request, collection_id, comment_id):
"""Delete comment if the staff or comment owner want to delete."""
collection = get_object_or_404(Collection, id=collection_id)
comment = get_object_or_404(Comment, id=comment_id, collection=collection)
if not request.user.is_authenticated:
messages.error(request, "Stop there! How dare you delete a comment without logging in?")
return redirect('collection', collection_id=collection.id)
if not request.user.is_staff and not request.user.is_superuser and request.user != comment.user:
messages.error(request, "Wait! This is not yours! You can't delete this comment!")
return redirect('collection', collection_id=collection.id)
# After this point, everything is valid now.
# It is safe to delete the comment
comment.delete()
messages.success(request, f"Delete comment successfully!")
return redirect('collection', collection_id=collection.id)
|
e8de0e5b8fb1ca8d6b6f27009f34ef0b8678c7cd
| 3,644,275
|
import numpy
def layers_weights_as_vector(model, initial=True):
"""
Creates a list holding the weights of each layer (Conv and Dense) in the CNN as a vector.
model: A reference to the instance from the cnn.Model class.
initial: When True, the function returns the initial weights of the CNN. When False, the trained weights of the CNN layers are returned. The initial weights are only needed before network training starts. The trained weights are needed to predict the network outputs.
Returns a list (network_weights) holding the weights of the CNN layers as a vector.
"""
network_weights = []
layer = model.last_layer
while "previous_layer" in layer.__init__.__code__.co_varnames:
if type(layer) in [Conv2D, Dense]:
# If the 'initial' parameter is True, append the initial weights. Otherwise, append the trained weights.
if initial == True:
vector = numpy.reshape(layer.initial_weights, newshape=(layer.initial_weights.size))
# vector = pygad.nn.DenseLayer.to_vector(matrix=layer.initial_weights)
network_weights.extend(vector)
elif initial == False:
vector = numpy.reshape(layer.trained_weights, newshape=(layer.trained_weights.size))
# vector = pygad.nn.DenseLayer.to_vector(array=layer.trained_weights)
network_weights.extend(vector)
else:
raise ValueError("Unexpected value to the 'initial' parameter: {initial}.".format(initial=initial))
# Go to the previous layer.
layer = layer.previous_layer
# If the first layer in the network is not an input layer (i.e. an instance of the Input2D class), raise an error.
if not (type(layer) is Input2D):
raise TypeError("The first layer in the network architecture must be an input layer.")
# Currently, the weights of the layers are in the reverse order. In other words, the weights of the first layer are at the last index of the 'network_weights' list while the weights of the last layer are at the first index.
# Reversing the 'network_weights' list to order the layers' weights according to their location in the network architecture (i.e. the weights of the first layer appears at index 0 of the list).
network_weights.reverse()
return numpy.array(network_weights)
|
3a13d44868cb67c8ba757db3ceed8b6cf01bfbfb
| 3,644,276
|
def calculate_moist_adiabatic_lapse_rate(t, p):
"""calculate moist adiabatic lapse rate from pressure, temperature
p: pressure in hPa
t: temperature in Kelvin
returns: moist adiabatic lapse rate in Kelvin/m
"""
es = 611.2*np.exp(17.67*(t-273.15)/(t-29.65)) # Bolton formula, es in Pa
qs = 0.622*es/(p*100-0.378*es)
num = 1 + lv*qs/(Rdry*t)
denom = 1 + lv**2*qs/(cp*Rvap*t**2)
gamma = g/cp*(1-num/denom)
return gamma
|
4a20b15bdee1ce72f10d85b74a68358fa7934093
| 3,644,277
|
def read_cat_file(genomeCatFile):
""" Read in genome categories and create dictionary of category name and
genomes in that category"""
inFile = open(genomeCatFile, 'r')
catDict = {}
for line in inFile:
line = line.strip()
entries = line.split()
genome = entries[0]
cat = entries[1]
if cat in catDict:
catDict[cat].add(genome)
else:
catDict[cat] = {genome}
inFile.close()
return catDict
|
23a30f29cb62d56a3e0763be34cad45717421815
| 3,644,278
|
def removeCable(n, edges):
"""
@param n 道路
@param edges 连通情况
"""
fa = initFa(n)
totalW, nodes = 0, []
for x, y, w in edges:
node = Node(x, y, w)
nodes.append(node)
totalW += w
def getW(node):
return node.w
nodes.sort(key=getW)
tmpW = 0
for node in nodes:
if find(fa, node.x) == find(fa, node.y):
continue
fa[find(fa, node.x)] = find(fa, node.y)
tmpW += node.w
return totalW - tmpW
|
4b43cc0ddd1ea89113a95a6771dea97d2b21a0fb
| 3,644,281
|
from read_file import read_selected, narrow
def upload():
"""POST route through which downloading sequence is triggered
:param checked: which pins were selected by user
:returns: log of arrays with pins, files downloaded counts, and notes
"""
DASHRlut = findSNs(compCrawl())
checked = request.get_json()
chosen = narrow(checked, DASHRlut)
log = read_selected(chosen)
return jsonify(log), 200
|
99a00d173574b789e8e6681a0373c2b805a42e39
| 3,644,282
|
def verify_credentials():
"""Verify credentials to gdrive for the current user"""
if 'credentials' not in flask.session:
return flask.redirect(flask.url_for('authorize_app', _external=True))
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if credentials.access_token_expired:
return flask.redirect(flask.url_for('authorize_app', _external=True))
return None
|
96ebe13a7e04f245fa432f0dcbfecdb490367ad9
| 3,644,283
|
def _gaussian_blur(heatmaps, kernel=11):
"""Modulate heatmap distribution with Gaussian.
sigma = 0.3*((kernel_size-1)*0.5-1)+0.8
sigma~=3 if k=17
sigma=2 if k=11;
sigma~=1.5 if k=7;
sigma~=1 if k=3;
Note:
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps.
kernel (int): Gaussian kernel size (K) for modulation, which should
match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
np.ndarray[N, K, H, W]: Modulated heatmap distribution.
"""
assert kernel % 2 == 1
border = (kernel - 1) // 2
batch_size = heatmaps.shape[0]
num_joints = heatmaps.shape[1]
height = heatmaps.shape[2]
width = heatmaps.shape[3]
for i in range(batch_size):
for j in range(num_joints):
origin_max = np.max(heatmaps[i, j])
dr = np.zeros((height + 2 * border, width + 2 * border),
dtype=np.float32)
dr[border:-border, border:-border] = heatmaps[i, j].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
heatmaps[i, j] = dr[border:-border, border:-border].copy()
heatmaps[i, j] *= origin_max / np.max(heatmaps[i, j])
return heatmaps
|
e49edc97eefc2f0de5200e4c8aee794642cb6a1f
| 3,644,284
|
def pose_vec2mat(vec):
"""Converts 6DoF parameters to transformation matrix
Args:
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]
Returns:
A transformation matrix -- [B, 4, 4]
"""
# batch_size, _ = vec.get_shape().as_list()
batch_size = tf.shape(vec)[0]
translation = tf.slice(vec, [0, 0], [-1, 3])
translation = tf.expand_dims(translation, -1)
rx = tf.slice(vec, [0, 3], [-1, 1])
ry = tf.slice(vec, [0, 4], [-1, 1])
rz = tf.slice(vec, [0, 5], [-1, 1])
rot_mat = euler2mat(rz, ry, rx)
rot_mat = tf.squeeze(rot_mat, axis=[1])
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch_size, 1, 1])
transform_mat = tf.concat([rot_mat, translation], axis=2)
transform_mat = tf.concat([transform_mat, filler], axis=1)
return transform_mat
|
1ecfb0461bc7ec19c1e730e4499510a890474b33
| 3,644,285
|
import collections
def get_gradients_through_compute_gradients(optimizer, loss, activations):
"""Compute gradients to send to TPU embedding.
Args:
optimizer: a subclass of optimizer.Optimizer, usually CrossShardOptimizer.
Used to call compute_gradients().
loss: a Tensor to call optimizer.compute_gradients() on.
activations: an OrderedDict mapping feature_name to Tensors of activations.
Returns:
An OrderedDict mapping from feature name Strings to Tensors of gradients of
the loss wrt the activations of the features.
"""
activation_list = activations.values()
grads_and_vars = optimizer.compute_gradients(loss, activation_list)
grads = [grad for grad, _ in grads_and_vars]
feature_to_gradient_dict = collections.OrderedDict(
zip(activations.keys(), grads))
return feature_to_gradient_dict
|
2a2ebca1e6024e11f541e3ccaf1fee4acd7ab745
| 3,644,286
|
def distance_to_mesh(mesh, pts, engine="auto", bvh=None):
""" Compute the distance from a set of points to a mesh.
Args:
mesh (:class:`Mesh`): A input mesh.
pts (:class:`numpy.ndarray`): A :math:`N \\times dim` array of query
points.
engine (``string``): BVH engine name. Valid choices are "cgal",
"geogram", "igl" if all dependencies are used. The default is
"auto" where an available engine is automatically picked.
bvh (:class:`BVH`): BVH engine instance (optional)
Returns:
Three values are returned.
* ``squared_distances``: squared distances from each point to mesh.
* ``face_indices`` : the closest face to each point.
* ``closest_points``: the point on mesh that is closest to each
query point.
"""
if not bvh:
bvh = BVH(engine, mesh.dim)
bvh.load_mesh(mesh)
squared_distances, face_indices, closest_points = bvh.lookup(pts)
return squared_distances, face_indices, closest_points
|
c44230d7e9cd18c2d992a85e2fba04a890b55ed8
| 3,644,287
|
from typing import Any
from typing import Tuple
from typing import Dict
def parse_config(settings: Any) -> Tuple[Dict[str, Queue], Dict[str, dict]]:
"""
SAQ configuration parsing.
Args:
settings: The settings (can be pydantic.BaseSettings).
Returns:
Tuple[Dict[str, Queue], Dict[str, dict]]: The SAQ queues and the queue settings.
"""
saq_queues: Dict[str, dict] = getattr(settings, "SAQ_QUEUES", {})
if not isinstance(saq_queues, dict):
raise RuntimeError("SAQ_QUEUES must be a dict, got {}".format(type(saq_queues)))
queue_maps = {}
queue_settings = {}
for q_name, q_param in saq_queues.items():
url = q_param.get("url", None)
if not url:
raise RuntimeError("No url specified for queue {}".format(q_name))
queue = Queue.from_url(url, q_name)
queue_maps[q_name] = queue
queue_settings[q_name] = q_param
return queue_maps, queue_settings
|
d2711efedff319fb181d062593338f32271f39d1
| 3,644,288
|
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Output`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
val = array_ops.zeros(primary.get_shape().as_list(), dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
|
ac940b8d92e4de025a2fc83695adb66a611935ea
| 3,644,289
|
def AdditionalMedicareTax(e00200, MARS,
AMEDT_ec, sey, AMEDT_rt,
FICA_mc_trt, FICA_ss_trt,
ptax_amc, payrolltax):
"""
Computes Additional Medicare Tax (Form 8959) included in payroll taxes.
Notes
-----
Tax Law Parameters:
AMEDT_ec : Additional Medicare Tax earnings exclusion
AMEDT_rt : Additional Medicare Tax rate
FICA_ss_trt : FICA Social Security tax rate
FICA_mc_trt : FICA Medicare tax rate
Taxpayer Charateristics:
e00200 : Wages and salaries
sey : Self-employment income
Returns
-------
ptax_amc : Additional Medicare Tax
payrolltax : payroll tax augmented by Additional Medicare Tax
"""
line8 = max(0., sey) * (1. - 0.5 * (FICA_mc_trt + FICA_ss_trt))
line11 = max(0., AMEDT_ec[MARS - 1] - e00200)
ptax_amc = AMEDT_rt * (max(0., e00200 - AMEDT_ec[MARS - 1]) +
max(0., line8 - line11))
payrolltax += ptax_amc
return (ptax_amc, payrolltax)
|
de0e35fbe5c7c09de384e1302cba082149ea5930
| 3,644,290
|
import copy
def append_step_list(step_list, step, value, go_next, mode, tag):
"""from step_list, append the number of times a step needs to be repeated
if runmode or retry is present
:Arguments:
step_list = Ordered list of steps to be executed
step = Current step
value = attempts in runmode/retry
go_next = value of the real next step
mode = runmode or retry
tag = In runmode it is value, in retry it is count
:Return:
step_list = New step list formed by appending the replicated steps
"""
for i in range(0, value):
copy_step = copy.deepcopy(step)
copy_step.find(mode).set(tag, go_next)
copy_step.find(mode).set("attempt", i + 1)
copy_step.find(mode).set(mode+"_val", value)
step_list.append(copy_step)
return step_list
|
b8b5b3614fea0709b484df087ffa3ee2861532c4
| 3,644,291
|
def load_license(request, project_slug):
"""
Reload the license input queryset with the right options for the
access form's current access policy choice. Called via ajax.
"""
user = request.user
project = ActiveProject.objects.filter(slug=project_slug)
if project:
project = project.get()
else:
raise Http404()
form = forms.AccessMetadataForm(instance=project)
form.set_license_queryset(access_policy=int(request.GET['access_policy']))
return render(request, 'project/license_input.html', {'form':form})
|
59fb710cfccfaaf642283e6fb26631f56a39cc1e
| 3,644,292
|
def wt():
"""Return default word tokenizer."""
return WordTokenizer()
|
d9e9a9c3cb99f1c3846ee54b38184d39d67051a7
| 3,644,293
|
import warnings
def epochplot(epochs, *, ax=None, height=None, fc='0.5', ec='0.5',
alpha=0.5, hatch='////', label=None, hc=None,**kwargs):
"""Docstring goes here.
"""
if ax is None:
ax = plt.gca()
ymin, ymax = ax.get_ylim()
if height is None:
height = ymax - ymin
if hc is not None:
try:
hc_before = mpl.rcParams['hatch.color']
mpl.rcParams['hatch.color']=hc
except KeyError:
warnings.warn("Hatch color not supported for matplotlib <2.0")
for ii, (start, stop) in enumerate(zip(epochs.starts, epochs.stops)):
ax.add_patch(
patches.Rectangle(
(start, ymin), # (x,y)
width=stop - start , # width
height=height, # height
hatch=hatch,
facecolor=fc,
edgecolor=ec,
alpha=alpha,
label=label if ii == 0 else "_nolegend_",
**kwargs
)
)
ax.set_xlim([epochs.start, epochs.stop])
if hc is not None:
try:
mpl.rcParams['hatch.color'] = hc_before
except UnboundLocalError:
pass
return ax
|
2e4f993ac48e6f054cd8781f4356b7afed41b369
| 3,644,294
|
def run_dag(
dag_id,
run_id=None,
conf=None,
replace_microseconds=True,
execution_date=None,
):
"""Runs DAG specified by dag_id
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run - even if more than one Dag Runs were present or None
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound("Dag id {} not found in DagModel".format(dag_id))
dagbag = DagBag(dag_folder=dag_model.fileloc)
"""
dagbag = DagBag()
dag_run = DagRun()
runs = _run_dag(
dag_id=dag_id,
dag_run=dag_run,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
replace_microseconds=replace_microseconds,
execution_date=execution_date,
)
return runs[0] if runs else None
|
124954d350a09d576b32e80fae4c56d1c0b2c141
| 3,644,295
|
def get_function_handle(method, var):
"""
Return a function handle to a given calculation method.
Parameters
----------
method : str
Identifier of the calculation method to return a handle to.
var : dict
Local variables needed in the mu update method.
Returns
-------
f_handle : function
Handle to the calculation method defined in this globals scope.
"""
return globals()['wrap_calculate_using_' + method](var)
|
e9f363908be5e628e2e17a781a3626737a3d3879
| 3,644,297
|
def build_receiver_model(params, ds_meta, utt_len: int, vocab_size: int, pre_conv=None) -> ReceiverModel:
"""
given the size of images from a dataset, and a desired vocab size and utterance length,
creates a ReceiverModel, which will take in images, and utterances, and classify
the images as being consistent with the utterances or not.
"""
p = params
if pre_conv is None:
pre_conv = pre_conv_lib.build_preconv(params=p, ds_meta=ds_meta)
multimodal_classifier = multimodal_classifiers.build_multimodal_classifier(
params=p, pre_conv=pre_conv, ds_meta=ds_meta)
linguistic_encoder = linguistic_encoders.build_linguistic_encoder(
params=p, utt_len=utt_len, vocab_size=vocab_size)
receiver_model = ReceiverModel(
pre_conv=pre_conv,
multimodal_classifier=multimodal_classifier,
linguistic_encoder=linguistic_encoder)
return receiver_model
|
59904d83fa48d390f472b69b5324005d1a28e9c6
| 3,644,298
|
import math
def fermi_fitness(strategy_pair, N, i, utilities, selection_intensity=1):
"""
Return the fermi fitness of a strategy pair in a population with
N total individuals and i individuals of the first type.
"""
F, G = [math.exp(k) for k in fitness(strategy_pair, N, i, utilities)]
return F / (F + G), G / (F + G)
|
fc2631d85ad0fa8ce879ff1fffd6976b1a1e1abf
| 3,644,299
|
def diff_smf(mstar_arr, volume, h1_bool, colour_flag=False):
"""
Calculates differential stellar mass function in units of h=1.0
Parameters
----------
mstar_arr: numpy array
Array of stellar masses
volume: float
Volume of survey or simulation
h1_bool: boolean
True if units of masses are h=1, False if units of masses are not h=1
colour_flag (optional): boolean
'R' if galaxy masses correspond to red galaxies & 'B' if galaxy masses
correspond to blue galaxies. Defaults to False.
Returns
---------
maxis: array
Array of x-axis mass values
phi: array
Array of y-axis values
err_tot: array
Array of error values per bin
bins: array
Array of bin edge values
counts: array
Array of number of things in each bin
"""
if not h1_bool:
# changing from h=0.7 to h=1 assuming h^-2 dependence
logmstar_arr = np.log10((10**mstar_arr) / 2.041)
else:
logmstar_arr = np.log10(mstar_arr)
if survey == 'eco' or survey == 'resolvea':
bin_min = np.round(np.log10((10**8.9) / 2.041), 1)
if survey == 'eco' and colour_flag == 'R':
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 6
elif survey == 'eco' and colour_flag == 'B':
bin_max = np.round(np.log10((10**11) / 2.041), 1)
bin_num = 6
elif survey == 'resolvea':
# different to avoid nan in inverse corr mat
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
else:
# For eco total
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
bins = np.linspace(bin_min, bin_max, bin_num)
elif survey == 'resolveb':
bin_min = np.round(np.log10((10**8.7) / 2.041), 1)
bin_max = np.round(np.log10((10**11.8) / 2.041), 1)
bins = np.linspace(bin_min, bin_max, 7)
# Unnormalized histogram and bin edges
counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins
dm = edg[1] - edg[0] # Bin width
maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers
# Normalized to volume and bin width
err_poiss = np.sqrt(counts) / (volume * dm)
err_tot = err_poiss
phi = counts / (volume * dm) # not a log quantity
phi = np.log10(phi)
return maxis, phi, err_tot, bins, counts
|
9152a86023c78e47ae0813c489c897800140f174
| 3,644,300
|
def get_parameter(dbutils, parameter_name: str, default_value='') -> str:
"""Creates a text widget and gets parameter value. If ran from ADF, the value is taken from there."""
dbutils.widgets.text(parameter_name, default_value)
return dbutils.widgets.get(parameter_name)
|
cf8359e6acea68ea26e24cc656847e5560019bd1
| 3,644,301
|
def single_init(cfg: GenomeConfig):
"""Random initialized floating GRU value, calculated via a normal distribution."""
return clip(gauss(cfg.gru_init_mean, cfg.gru_init_stdev), a_min=cfg.gru_min_value, a_max=cfg.gru_max_value)
|
a72e534259a0d3e0fa3f3081b049bc8c5c316686
| 3,644,302
|
def get_recent_articles(request):
"""
获取最近更新内容
"""
user = get_login_user(request)
recommend = request.POST.get('recommend', 'recommend')
if recommend == 'unrecommend':
articles = Article.objects.raw(get_other_articles_sql)
elif recommend == 'recommend':
articles = Article.objects.raw(get_recommend_articles_sql)
else:
logger.warning(f'未知的类型:{recommend}')
user_sub_feeds = []
if user:
user_sub_feeds = get_user_sub_feeds(user.oauth_id)
context = dict()
context['articles'] = articles
context['user'] = user
context['user_sub_feeds'] = user_sub_feeds
return render(request, 'explore/recent_articles.html', context=context)
|
9ea03f931d67c669f99a87a27ec88bb78a7cd7e2
| 3,644,303
|
import copy
def add_close_export_to_cell(cell):
"""
Adds an HTML comment to close question export for PDF filtering to the top of ``cell``. ``cell``
should be a Markdown cell. This adds ``<!-- END QUESTION-->`` as the first line of the cell.
Args:
cell (``nbformat.NotebookNode``): the cell to add the close export to
Returns:
``nbformat.NotebookNode``: the cell with the close export comment at the top
"""
cell = copy.deepcopy(cell)
source = get_source(cell)
source = ["<!-- END QUESTION -->\n", "\n"] + source
cell['source'] = "\n".join(source)
return cell
|
4fa7d83a8c262979b2d3ef95ddc7c0c50c7e68f7
| 3,644,304
|
def get_ram_list_linux():
"""Get RAM list using dmidecode."""
cmd = ['sudo', 'dmidecode', '--type', 'memory']
dimm_list = []
manufacturer = 'Unknown'
size = 0
# Get DMI data
proc = run_program(cmd)
dmi_data = proc.stdout.splitlines()
# Parse data
for line in dmi_data:
line = line.strip()
if line == 'Memory Device':
# Reset vars
manufacturer = 'Unknown'
size = 0
elif line.startswith('Size:'):
size = line.replace('Size: ', '')
try:
size = string_to_bytes(size, assume_binary=True)
except ValueError:
# Assuming empty module
size = 0
elif line.startswith('Manufacturer:'):
manufacturer = line.replace('Manufacturer: ', '')
dimm_list.append([size, manufacturer])
# Save details
return dimm_list
|
3511ea9f5e09ae467c7d9cb7c42f83741a431eda
| 3,644,305
|
def get_capability_list(capability=esdl.Producer):
"""Returns a list of all subtypes of the specified capability.
Used to get a list of e.g. all producers in ESDL
The list is automatically generated based on the ESDL meta model"""
subtype_list = list()
for eclassifier in esdl.eClass.eClassifiers:
if isinstance(eclassifier, EClass):
if capability.eClass in eclassifier.eAllSuperTypes() and not eclassifier.abstract:
subtype_list.append(eclassifier.name)
subtype_list.sort()
return subtype_list
|
700dec944e8a1185c5763a6b32c08fe553f35459
| 3,644,306
|
import errno
def _get_exec_binary(binary, kw):
"""
On win32, the subprocess module can only reliably resolve the
target binary if it's actually a binary; as for a Node.js script
it seems to only work iff shell=True was specified, presenting
a security risk. Resolve the target manually through which will
account for that.
The kw argument is the keyword arguments that will be passed into
whatever respective subprocess.Popen family of methods. The PATH
environment variable will be used if available.
"""
binary = which(binary, path=kw.get('env', {}).get('PATH'))
if binary is None:
raise_os_error(errno.ENOENT)
return binary
|
654d8f01419712ac774e0f7c5d4b02b9219d3153
| 3,644,307
|
import site
def init_SSE_square(Lx, Ly):
"""Initialize a starting configuration on a 2D square lattice."""
n_sites = Lx*Ly
# initialize spins randomly with numbers +1 or -1, but the average magnetization is 0
spins = 2*np.mod(np.random.permutation(n_sites), 2) - 1
op_string = -1 * np.ones(10, np.intp) # initialize with identities
bonds = []
for x0 in range(Lx):
for y0 in range(Ly):
s0 = site(x0, y0, Lx, Ly)
s1 = site(np.mod(x0+1, Lx), y0, Lx, Ly) # bond to the right
bonds.append([s0, s1])
s2 = site(x0, np.mod(y0+1, Ly), Lx, Ly) # bond to the top
bonds.append([s0, s2])
bonds = np.array(bonds, dtype=np.intp)
return spins, op_string, bonds
|
0c0681b3a28680ed4acf6e2d7ed5719031df948b
| 3,644,308
|
import signal
def filter_signal(eeg_df, iqrs, dic_filt_opts):
"""
Filter signal
"""
all_labels = list(eeg_df.columns)
# check the order of labels
label_grouped = False
if all_labels[0].split('.')[-1] == all_labels[1].split('.')[-1]:
label_grouped = True
data_labels = all_pow_nodes
meta_labels = [lab for lab in all_labels if lab not in data_labels]
eeg_pow_filt = []
for phase in eeg_df.phase.unique():
print('\t',phase)
sub = eeg_df.loc[ (eeg_df.phase == phase), :].copy()
sub = sub.reset_index(drop=True)
meta = sub[meta_labels].values # [N, ]
data = sub[data_labels].values # always [N,70]
if dic_filt_opts['per_phases']:
th_up_all = iqrs[(dic_filt_opts['datafiltset'], phase)] # OLDER ORDER
else:
th_up_all = iqrs[(dic_filt_opts['datafiltset'], dic_filt_opts['setphase'])] # OLDER ORDER
if label_grouped:
th_up_all = iqr_by_group(th_up_all) # group iqrs
print('\tFiltering --> nodes are grouped')
m_thresh = np.repeat([np.array(th_up_all)], data.shape[0], axis=0)
mask = data > m_thresh
data[mask] = m_thresh[mask] / 2.
# median filter applying
for rr in range(data.shape[1]): # by colums (70 cols = 14 channesl * 5 waves)
data[:, rr] = signal.medfilt(data[:, rr], kernel_size=3)
df = pd.DataFrame(np.concatenate((data, meta), axis=1), columns=data_labels + meta_labels)
eeg_pow_filt.append(df)
del df
eeg_pow_filt = pd.concat(eeg_pow_filt, axis=0, ignore_index=True)
return eeg_pow_filt
|
f9dd9108d3c17a59eaae9fe509a88a6c3be55db2
| 3,644,309
|
from typing import List
def get_sym_inequiv_components(
components: List[Component], spg_analyzer: SpacegroupAnalyzer
) -> List[Component]:
"""Gets and counts the symmetrically inequivalent components.
Component data has to have been generated with ``inc_site_ids=True``.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`,
with ``inc_site_ids=True``.
spg_analyzer: A `pymatgen.symmetry.analyzer.SpacegroupAnalyzer` analyzer
object for the structure containing the components.
Returns:
A list of the symmetrically inequivalent components. Any duplicate
components will only be returned once. The component objects are in the
same format is given by
:obj:`pymatgen.analysis.dimensionality.get_structure_components` but
the additional property:
- ``"count"`` (:obj:`int`): The number of times this component appears
in the structure.
"""
components = deepcopy(components)
sym_inequiv_components = {}
equivalent_atoms = spg_analyzer.get_symmetry_dataset()["equivalent_atoms"]
for component in components:
sym_indices = frozenset(equivalent_atoms[x] for x in component["site_ids"])
# if two components are composed of atoms that are symmetrically
# equivalent they are the same.
if sym_indices in sym_inequiv_components:
sym_inequiv_components[sym_indices]["count"] += 1
continue
component["count"] = 1
sym_inequiv_components[sym_indices] = component
return list(sym_inequiv_components.values())
|
5ce83345712ac336a6af2b02421bfef9a62bbf0f
| 3,644,310
|
def aic(llf, nobs, df_modelwc):
"""
Akaike information criterion
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aic : float
information criterion
References
----------
https://en.wikipedia.org/wiki/Akaike_information_criterion
"""
return -2.0 * llf + 2.0 * df_modelwc
|
3940c1c86325630248fdf4a50c2aa19b4f4df623
| 3,644,311
|
def summarize_logs(df, wells, cat, props, sr=0.5):
"""
Function to calculate petrophysical summaries based on well and categorical data. All logs averaged with simple
arithmetic means (maybe supply log permeability to have a better averaged estimation)
Parameters:
logs (pd.DataFrame): dataframe containing well logs data, use appropiate filters in advance to provide net logs
wells (string): column with well names in the logs dataframe
cat (string): column with filtering discrete property in the logs dataframe
props (list:string): list of properties (logs) to be summarized
sr (float): log sampling rate in project units for net thickness calculations
Returns:
summ (pd.DataFrame): dataframe with summarized data
"""
col_list = []
col_list.append(wells)
col_list.append(cat)
[col_list.append(i) for i in props]
df1 = df[col_list].dropna(axis=0, how='any')
col_list.append('NetH')
summ = pd.DataFrame(columns=col_list)
idx = 0
for well in df1[wells].unique():
for cat_ in df1[cat].unique():
summ.loc[idx, [wells, cat]] = [well, cat_]
summ.loc[idx, props] = df1[(df1[wells]==well)&(df1[cat]==cat_)][props].mean()
summ.loc[idx, 'NetH'] = df1[(df1[wells]==well)&(df1[cat]==cat_)][props[0]].count() * sr
idx += 1
for col in summ.columns:
if col not in [wells, cat]:
summ[col] = pd.to_numeric(summ[col], errors='ignore')
return summ
|
3a48fa7d1efc83a8216f01505a645d37b173ecbb
| 3,644,312
|
def get_tfpn_mean(targets, predictions):
"""
给定标签和预测,返回对应所有类的 Tp, FN, FP, TN 的平均值
:param targets:
:param predictions:
:return:
"""
cm = confusion_matrix(targets, predictions)
total = np.array(cm).sum()
TP = cm.diagonal().sum()
FN = total - TP
FP = FN
TN = total * len(cm) - TP - FN - FP
return TP, FN, FP, TN
|
06c3b184c1b35a22eb3594e934c3aef8e278ebaa
| 3,644,314
|
def cal_deltaE00_from_LCh(LCh_1, Lab_2):
"""
Calculate the color difference :math:`\Delta E_{00}` between two given colorspace arrays.
:param LCh_1: array-like
:param Lab_2: array-like
:return: numeric or ndarray
"""
Lab_1 = LCh2Lab(LCh_1)
return deltaE00(Lab_1, Lab_2)
|
b774be88ad2ca7032f0471963b2720b0e6ecf5f7
| 3,644,315
|
def get_var_type_glue(vtype):
"""Get glue module from variable's type.
Parameters
----------
vtype: data type
Returns
-------
Glue Module if glue exists, otherwise None.
"""
global DTYPE_TO_GLUE, PKG_NAME_TO_GLUE_ARGS
glue_mod = DTYPE_TO_GLUE.get(vtype, None)
if glue_mod is not None:
return glue_mod
pkg_name = vtype.__module__.split('.')[0]
if pkg_name not in PKG_NAME_TO_GLUE_ARGS:
return None
# try to register glue_mod
_register_glue_real(*PKG_NAME_TO_GLUE_ARGS[pkg_name])
return DTYPE_TO_GLUE.get(vtype, None)
|
d7ba7798286142b70e0dbd8e938f2e3a4ae0423e
| 3,644,316
|
def contract_TRG(state, svd_option_1st=None, svd_option_rem=None):
"""
Contract the PEPS using Tensor Renormalization Group.
Parameters
----------
svd_option_1st: tensorbackends.interface.Option, optional
Parameters for the first SVD in TRG. Will default to tensorbackends.interface.ReducedSVD() if not given.
svd_option_rem: tensorbackends.interface.Option, optional
Parameters for the remaining SVD truncations. Will perform SVD if given.
Returns
-------
output: state.backend.tensor or scalar
The contraction result.
References
----------
https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.99.120601
https://journals.aps.org/prb/abstract/10.1103/PhysRevB.78.205116
"""
# base case
if state.shape <= (2, 2):
return contract_BMPS(state, svd_option_rem)
# SVD each tensor into two
tn = np.empty(state.shape + (2,), dtype=object)
for (i, j), tsr in np.ndenumerate(state.grid):
str_uv = 'abi,icdpq' if (i+j) % 2 == 0 else 'aidpq,bci'
tn[i,j,0], _, tn[i,j,1] = state.backend.einsumsvd(
'abcdpq->' + str_uv, tsr,
option=svd_option_1st or ReducedSVD(),
absorb_s='even'
)
tn[i,j,(i+j)%2] = tn[i,j,(i+j)%2].reshape(*(tn[i,j,(i+j)%2].shape + (1, 1)))
return _contract_TRG(state, tn, svd_option_rem)
|
0e6876c778e6a2df552a6de8b3253d7a860e1987
| 3,644,317
|
def riccati_3(nmax,x):
"""Riccati bessel function of the 3rd kind
returns (r3, r3'), n=0,1,...,nmax"""
x = np.asarray(x)
result = np.zeros((2,nmax) + x.shape, dtype=complex)
for n in range(nmax):
yn = special.spherical_yn(n+1,x)
ynp = special.spherical_yn(n+1,x, derivative=True)
result[0,n] = x*yn
result[1,n] = yn + x*ynp
return result
|
32d344e8ac4e1f01bbe5605dd4c9a6563497ac71
| 3,644,318
|
def conv_batch_relu_forward(x, w, b, gamma, beta, conv_param, bn_param):
"""
Convenience layer that performs a convolution, a batch, and a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- gamma, beta, bn_param : batch norm parameters
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
convOut, conv_cache = layers.conv_forward(x, w, b, conv_param)
normOut, norm_cache = layers.spatial_batchnorm_forward(convOut, gamma, beta, bn_param)
out, relu_cache = layers.relu_forward(normOut)
cache = (conv_cache, norm_cache, relu_cache)
return out, cache
|
8c306a2337307ec68aa5a536b4aef0dc4f34cf39
| 3,644,319
|
def hex_layout(npos, width, rotate=None):
"""Compute positions in a hexagon layout.
Place the given number of positions in a hexagonal layout projected on
the sphere and centered at z axis. The width specifies the angular
extent from vertex to vertex along the "X" axis. For example::
Y ^ O O O
| O O O O
| O O + O O
+--> X O O O O
O O O
Each position is numbered 0..npos-1. The first position is at the center,
and then the positions are numbered moving outward in rings.
Args:
npos (int): The number of positions packed onto wafer.
width (float): The angle (in degrees) subtended by the width along
the X axis.
rotate (array, optional): Optional array of rotation angles in degrees
to apply to each position.
Returns:
(array): Array of quaternions for the positions.
"""
zaxis = np.array([0, 0, 1], dtype=np.float64)
nullquat = np.array([0, 0, 0, 1], dtype=np.float64)
sixty = np.pi/3.0
thirty = np.pi/6.0
rtthree = np.sqrt(3.0)
rtthreebytwo = 0.5 * rtthree
angdiameter = width * np.pi / 180.0
# find the angular packing size of one detector
nrings = hex_nring(npos)
posdiam = angdiameter / (2 * nrings - 2)
result = np.zeros((npos, 4), dtype=np.float64)
for pos in range(npos):
if pos == 0:
# center position has no offset
posrot = nullquat
else:
# Not at the center, find ring for this position
test = pos - 1
ring = 1
while (test - 6 * ring) >= 0:
test -= 6 * ring
ring += 1
sectors = int(test / ring)
sectorsteps = np.mod(test, ring)
# Convert angular steps around the ring into the angle and distance
# in polar coordinates. Each "sector" of 60 degrees is essentially
# an equilateral triangle, and each step is equally spaced along
# the edge opposite the vertex:
#
# O
# O O (step 2)
# O O (step 1)
# X O O O (step 0)
#
# For a given ring, "R" (center is R=0), there are R steps along
# the sector edge. The line from the origin to the opposite edge
# that bisects this triangle has length R*sqrt(3)/2. For each
# equally-spaced step, we use the right triangle formed with this
# bisection line to compute the angle and radius within this
# sector.
# The distance from the origin to the midpoint of the opposite
# side.
midline = rtthreebytwo * float(ring)
# the distance along the opposite edge from the midpoint (positive
# or negative)
edgedist = float(sectorsteps) - 0.5 * float(ring)
# the angle relative to the midpoint line (positive or negative)
relang = np.arctan2(edgedist, midline)
# total angle is based on number of sectors we have and the angle
# within the final sector.
posang = sectors * sixty + thirty + relang
posdist = rtthreebytwo * posdiam * float(ring) / np.cos(relang)
posx = np.sin(posdist) * np.cos(posang)
posy = np.sin(posdist) * np.sin(posang)
posz = np.cos(posdist)
posdir = np.array([posx, posy, posz], dtype=np.float64)
norm = np.sqrt(np.dot(posdir, posdir))
posdir /= norm
posrot = qa.from_vectors(zaxis, posdir)
if rotate is None:
result[pos] = posrot
else:
prerot = qa.rotation(zaxis, rotate[pos] * np.pi / 180.0)
result[pos] = qa.mult(posrot, prerot)
return result
|
85141e08c8a75a54953ba78c520b87d377aad3fb
| 3,644,320
|
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""
One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f == g*h (mod m)
s*g + t*h == 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) == 1
deg(f) == deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f == G*H (mod m**2)
S*G + T**H == 1 (mod m**2)
References
==========
1. [Gathen99]_
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
|
4ce2c7e9aaf52a9ef3e7ce68d164c82959b22ebb
| 3,644,321
|
def generate_sobol_index_sample_sets(samplesA, samplesB, index):
"""
Given two sample sets A and B generate the sets :math:`A_B^{I}` from
The rows of A_B^I are all from A except for the rows with non zero entries
in the index I. When A and B are QMC samples it is best to change as few
rows as possible
See
Variance based sensitivity analysis of model output. Design and estimator
for the total sensitivity index
"""
nvars = samplesA.shape[0]
I = np.arange(nvars)
mask = np.asarray(index, dtype=bool)
samples = np.vstack([samplesA[~mask], samplesB[mask]])
J = np.hstack([I[~mask], I[mask]])
samples = samples[np.argsort(J), :]
return samples
|
15b02e1995bc922b33d6e0e32bdde1559f0a762e
| 3,644,322
|
import logging
def setup_logfile_logger(log_path, log_level=None, log_format=None, date_format=None):
"""
Set up logging to a file.
"""
# Create the handler
handler = WatchedFileHandler(log_path, mode='a', encoding='utf-8', delay=0)
if log_level:
# Grab and set the level
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
handler.setLevel(level)
# Set the default console formatter config
if not log_format:
log_format = '%(asctime)s [%(name)s][%(levelname)s] %(message)s'
if not date_format:
date_format = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(log_format, datefmt=date_format)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
return handler
|
cfc22a2e334aad5d4aa573014af8a8bac4a7e6b1
| 3,644,323
|
import warnings
def fix_encoding_and_explain(text):
"""
Deprecated copy of `ftfy.fix_encoding_and_explain()`.
"""
warnings.warn(
"`fix_encoding_and_explain()` has moved to the main module of ftfy.",
DeprecationWarning,
)
return ftfy.fix_encoding_and_explain(text)
|
3a76fefcbc68b6cf68f3262b90ff277424bf1eba
| 3,644,324
|
def parse_16bit_color(color16):
"""解析16位的颜色
:param color16: 16位的颜色值
"""
r = int(gamma5[int((color16 >> 11) & 0x1F)])
g = int(gamma6[int((color16 >> 5) & 0x3F)])
b = int(gamma5[int(color16 & 0x1F)])
return (r, g, b)
|
c66448c9e886db1e696afa706577d02b3411cd92
| 3,644,325
|
def orders():
"""
List all orders
"""
orders = Order.query.filter_by(user_id=current_user.id).all()
return render_template('customer/orders.html', orders=orders, title="Orders")
|
921552a41ef673cb9c8f6c414ba4a12b3643617a
| 3,644,326
|
def packpeeklist1(n1, n2, n3, n4, n5):
"""
Packs and returns 5 item list
"""
listp = [n1, n2, n3, n4, n5]
return listp
|
4b781ff3e8eb4a1bd51f8e834fab5462371a85c5
| 3,644,327
|
from typing import List
def valid_commands(commands: List[str]) -> List[str]:
"""
Get list of valid commands from list of commands.
:param (list) commands: User-supplied commands.
:return:
"""
return [command for command in commands if command in available_commands()]
|
25054d8acb8bee16855adba25e846bc128fb9f23
| 3,644,328
|
def duck_list(request):
""" lists all ducks """
ducks = Duck.objects.all()
return render(request, 'duck/list.html', {'duck_list': ducks})
|
206e586c2709d4c5526e26ff50cabdfe440125bc
| 3,644,329
|
def get_debian_version(file_path):
"""
Get the version of a debian file
:param file_path: the path of the debian file
:return: the version of the debian file
"""
cmd_args = ["dpkg-deb", "-f", file_path, "Version"]
debian_version = run_command(cmd_args)
return debian_version
|
61c8779d4b235a1d74bf819299f95077e5ff001a
| 3,644,330
|
from typing import Optional
def hash_type(
draw, hash_type_strategy: Optional[SearchStrategy[HashType]] = None
) -> HashType:
"""Composite strategy for fetching a :class:`~modist.package.hasher.HashType`."""
return draw(HashType_strategy if not hash_type_strategy else hash_type_strategy)
|
79152dda823dcd227545ed2bb117229344fc341a
| 3,644,331
|
def get_initializer(initializer_name):
"""Get the corresponding initializer function based on the initializer string.
API of an initializer:
init_fn, hparams = get_initializer(init)
new_params, final_l = init_fn(loss, init_params, hps,
num_outputs, input_shape)
Args:
initializer_name: (str) e.g. default.
Returns:
initializer
Raises:
ValueError if model is unrecognized.
"""
try:
return _ALL_INITIALIZERS[initializer_name][0]
except KeyError:
raise ValueError('Unrecognized initializer: {}'.format(initializer_name))
|
778941a5e7937600cca2a48371d2540cab6476ab
| 3,644,332
|
def _l2_project_reference(z_p, p, z_q):
"""Projects distribution (z_p, p) onto support z_q under L2-metric over CDFs.
The supports z_p and z_q are specified as tensors of distinct atoms (given
in ascending order).
Let Kq be len(z_q) and Kp be len(z_p). This projection works for any
support z_q, in particular Kq need not be equal to Kp.
Args:
z_p: Tensor holding support of distribution p, shape `[batch_size, Kp]`.
p: Tensor holding probability values p(z_p[i]), shape `[batch_size, Kp]`.
z_q: Tensor holding support to project onto, shape `[Kq]`.
Returns:
Projection of (z_p, p) onto support z_q under Cramer distance.
"""
# Broadcasting of tensors is used extensively in the code below. To avoid
# accidental broadcasting along unintended dimensions, tensors are defensively
# reshaped to have equal number of dimensions (3) throughout and intended
# shapes are indicated alongside tensor definitions. To reduce verbosity,
# extra dimensions of size 1 are inserted by indexing with `None` instead of
# `tf.expand_dims()` (e.g., `x[:, None, :]` reshapes a tensor of shape
# `[k, l]' to one of shape `[k, 1, l]`).
# Extract vmin and vmax and construct helper tensors from z_q
vmin, vmax = z_q[0], z_q[-1]
d_pos = tf.concat([z_q, vmin[None]], 0)[1:] # 1 x Kq x 1
d_neg = tf.concat([vmax[None], z_q], 0)[:-1] # 1 x Kq x 1
# Clip z_p to be in new support range (vmin, vmax).
z_p = tf.clip_by_value(z_p, vmin, vmax)[:, None, :] # B x 1 x Kp
# Get the distance between atom values in support.
d_pos = (d_pos - z_q)[None, :, None] # z_q[i+1] - z_q[i]. 1 x B x 1
d_neg = (z_q - d_neg)[None, :, None] # z_q[i] - z_q[i-1]. 1 x B x 1
z_q = z_q[None, :, None] # 1 x Kq x 1
# Ensure that we do not divide by zero, in case of atoms of identical value.
d_neg = tf.where(d_neg > 0, 1./d_neg, tf.zeros_like(d_neg)) # 1 x Kq x 1
d_pos = tf.where(d_pos > 0, 1./d_pos, tf.zeros_like(d_pos)) # 1 x Kq x 1
delta_qp = z_p - z_q # clip(z_p)[j] - z_q[i]. B x Kq x Kp
d_sign = tf.cast(delta_qp >= 0., dtype=p.dtype) # B x Kq x Kp
# Matrix of entries sgn(a_ij) * |a_ij|, with a_ij = clip(z_p)[j] - z_q[i].
# Shape B x Kq x Kp.
delta_hat = (d_sign * delta_qp * d_pos) - ((1. - d_sign) * delta_qp * d_neg)
p = p[:, None, :] # B x 1 x Kp.
return tf.reduce_sum(tf.clip_by_value(1. - delta_hat, 0., 1.) * p, 2)
|
e6c43d1d05237410ff94d3a3b76bc705f2064d46
| 3,644,335
|
def _make_blocksizes(bricksize, surveysize, nlods, dtype, factor=(1,1,1), verbose=None):
"""
CURRENTLY NOT USED.
Calculate the minimum blocksize to read at each lod level. Clip to
the survey size. Also compute the memory needed to hold one buffer
for each lod. Note that the genlod algorithm currently assumes
that the block size is the same for all levels except for the
clipping. And it currently handles clipping itself and might not
like us to do so. Currently this function is not very useful.
"""
blocksizes = np.zeros((nlods, 3), dtype=np.int64)
ss = np.array(surveysize, dtype=np.int64)
bs = np.array([2*factor[0]*bricksize[0],
2*factor[1]*bricksize[1],
ss[2]], dtype=np.int64)
iterations = 0
for lod in range(nlods):
bs = np.minimum(bs, ss)
blocksizes[lod] = bs
iterations += np.product((ss+bs-1) // bs)
ss = (ss + 1) // 2
bytesused = np.sum(np.product(blocksizes, axis=1)) * int(np.dtype(dtype).itemsize)
returntype = namedtuple("BlockSizeInfo", "blocksizes bytesused iterations")
result = returntype(blocksizes, bytesused, iterations)
print(result)
return result
|
defc1f11d8f3684ccce4a7df7db6c011848187af
| 3,644,336
|
from ._backend import _check_backend
from ._kit2fiff_gui import Kit2FiffFrame
def kit2fiff():
"""Convert KIT files to the fiff format.
The recommended way to use the GUI is through bash with::
$ mne kit2fiff
"""
_check_mayavi_version()
_check_backend()
gui = Kit2FiffFrame()
gui.configure_traits()
return gui
|
b57b74d036378b1265e991b8c49d55bce41807c0
| 3,644,337
|
import tqdm
def sweep_dec_given_x(full_model, z_dec_model, sample1, sample2, sample_layer_name,
sweep_z_samples=False,
nb_samples=10,
nargout=1,
tqdm=tqdm):
"""
sweep the latent space given two samples in the original space
specificaly, get z_mu = enc(x) for both samples, and sweep between those z_mus
"sweep_z_samples" does a sweep between two samples, rather than between two z_mus.
Example:
sample_layer_name='img-img-dense-vae_ae_dense_sample'
"""
# get a model that also outputs the samples z
full_output = [*full_model.outputs,
full_model.get_layer(sample_layer_name).get_output_at(1)]
full_model_plus = keras.models.Model(full_model.inputs, full_output)
# get full predictions for these samples
pred1 = full_model_plus.predict(sample1[0])
pred2 = full_model_plus.predict(sample2[0])
img1 = sample1[0]
img2 = sample2[0]
# sweep range
x_range = np.linspace(0, 1, nb_samples)
# prepare outputs
outs = [None] * nb_samples
for xi, x in enumerate(tqdm(x_range)):
if sweep_z_samples:
z = x * pred1[3] + (1-x) * pred2[3]
else:
z = x * pred1[1] + (1-x) * pred2[1]
if isinstance(sample1[0], (list, tuple)): # assuming prior or something like that
outs[xi] = z_dec_model.predict([z, *sample1[0][1:]])
else:
outs[xi] = z_dec_model.predict(z)
if nargout == 1:
return outs
else:
return (outs, [pred1, pred2])
|
9a5bccfa85f0bdda5953b8a72c66238e0ff5d548
| 3,644,338
|
def process(observation, current_game_state):
"""
Args:
observation: An observation, which agents get as an input from kaggle environment.
current_game_state: An object provided by kaggle to simplify game info extraction.
Returns:
processed_observations: A prepared observation to save to the buffer.
"""
global units_actions_dict
player = current_game_state.players[observation.player]
opponent = current_game_state.players[(observation.player + 1) % 2]
width, height = current_game_state.map.width, current_game_state.map.height
shift = int((MAX_MAP_SIDE - width) / 2) # to make all feature maps 32x32
turn = current_game_state.turn
player_units_coords = {}
player_city_tiles_coords = {}
player_research_points = player.research_points
player_city_tiles_count = player.city_tile_count
player_cities_count = len(player.cities)
player_units_count = len(player.units)
player_workers_count = 0
player_carts_count = 0
for unit in player.units:
if unit.is_worker():
player_workers_count += 1
elif unit.is_cart():
player_carts_count += 1
else:
raise ValueError
opponent_research_points = opponent.research_points
opponent_city_tiles_count = opponent.city_tile_count
opponent_cities_count = len(opponent.cities)
opponent_units_count = len(opponent.units)
opponent_workers_count = 0
opponent_carts_count = 0
for unit in opponent.units:
if unit.is_worker():
opponent_workers_count += 1
elif unit.is_cart():
opponent_carts_count += 1
else:
raise ValueError
current_cycle, to_next_day, to_next_night, is_night = get_timing(turn)
# map data, define resources and roads, 0 or 1 for bool, 0 to around 1 for float;
# layers:
# 0 - a resource
# 1 - is wood
# 2 - wood amount
# 3 - is coal
# 4 - coal amount
# 5 - is uranium
# 6 - uranium amount
# 7 - fuel equivalent
# 8 - if a resource is available for the player, 1 when ready
# 9 - a road lvl
# 10 - 19 for coordinates
# number_of_resources_layers = 20
# A1 = np.zeros((number_of_resources_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
# for yy in range(height):
# for xx in range(width):
# cell = current_game_state.map.get_cell(xx, yy)
# x, y = yy + shift, xx + shift
# if cell.has_resource():
# A1[0, x, y] = 1 # a resource at the point
# resource = cell.resource
# if resource.type == "wood":
# A1[1, x, y] = 1
# wood_amount = resource.amount
# A1[2, x, y] = wood_amount / WOOD_BOUND
# fuel = wood_amount * WOOD_FUEL_VALUE
# A1[8, x, y] = 1 # wood is always available
# elif resource.type == "coal":
# A1[3, x, y] = 1
# coal_amount = resource.amount
# A1[4, x, y] = coal_amount / COAL_BOUND
# fuel = coal_amount * COAL_FUEL_VALUE
# A1[8, x, y] = min(player_research_points / COAL_RESEARCH_POINTS, 1)
# elif resource.type == "uranium":
# A1[5, x, y] = 1
# uran_amount = resource.amount
# A1[6, x, y] = uran_amount / URAN_BOUND
# fuel = uran_amount * URAN_FUEL_VALUE
# A1[8, x, y] = min(player_research_points / URAN_RESEARCH_POINTS, 1)
# else:
# raise ValueError
# A1[7, x, y] = fuel / FUEL_BOUND
# A1[9, x, y] = cell.road / MAX_ROAD
# A1[10:15, x, y] = to_binary(np.asarray((x,), dtype=np.uint8), m=5)
# A1[15:20, x, y] = to_binary(np.asarray((y,), dtype=np.uint8), m=5)
# map data, define resources and roads, 0 or 1 for bool, 0 to around 1 for float;
# layers:
# 0 - a resource
# 1 - is available
# 2 - amount
# 3 - fuel equivalent
# 4 - a road lvl
# 5 - 14 for coordinates
# 15 - next available resource
number_of_resources_layers = 16
A1 = np.zeros((number_of_resources_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
for yy in range(height):
for xx in range(width):
cell = current_game_state.map.get_cell(xx, yy)
x, y = yy + shift, xx + shift
if cell.has_resource():
A1[0, x, y] = 1 # a resource at the point
resource = cell.resource
fuel = 0
if resource.type == "wood":
A1[1, x, y] = 1
wood_amount = resource.amount
A1[2, x, y] = wood_amount / WOOD_BOUND
fuel = wood_amount * WOOD_FUEL_VALUE
elif resource.type == "coal":
if player_research_points >= COAL_RESEARCH_POINTS:
A1[1, x, y] = 1
coal_amount = resource.amount
A1[2, x, y] = coal_amount / COAL_BOUND
fuel = coal_amount * COAL_FUEL_VALUE
else:
A1[15, x, y] = 1
elif resource.type == "uranium":
if player_research_points >= URAN_RESEARCH_POINTS:
A1[1, x, y] = 1
uran_amount = resource.amount
A1[2, x, y] = uran_amount / URAN_BOUND
fuel = uran_amount * URAN_FUEL_VALUE
elif player_research_points >= URAN_RESEARCH_POINTS - 50:
A1[15, x, y] = 1
else:
raise ValueError
A1[3, x, y] = fuel / FUEL_BOUND
A1[4, x, y] = cell.road / MAX_ROAD
A1[5:10, x, y] = to_binary(np.asarray((x,), dtype=np.uint8), m=5)
A1[10:15, x, y] = to_binary(np.asarray((y,), dtype=np.uint8), m=5)
# define city tiles, 0 or 1 for bool, 0 to around 1 for float;
# layers:
number_of_main_layers = 39
A2 = np.zeros((number_of_main_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
# 0 - a unit
# 1 - is player
# 2 - is opponent
# 3 - at the city tile
# 4 - first place in the city tile is occupied by the unit - fill later (in get_separate_outputs)
# 5 - second place is occupied by the unit, and the first was occupied before - fill later
# 6 - third place is occupied - fill later
# 7 - forth place is occupied - fill later
# 8 - the place number is more than 4th - fill later
# 9 - is worker - X0
# 10 - is cart - X1
# 11 - can act - X2
# 12 - can build - X3
# 13 - cargo wood - X4
# 14 - cargo coal - X5
# 15 - cargo uranium - X6
# 16 - cargo space left - X7
# 17 - fuel equivalent - X8
# 18 - is city tile
# 19 - is player
# 20 - is opponent
# 21 - can act
# 22 - amount of city tiles in the city, which the city tile belongs to
# 23 - current city upkeep
# 24 - fuel amount
# 25 - ratio if city can survive, 1 and more means it can
# 26 - amount of all friendly city tiles
# 27 - amount of cities
# 28 - units build limit reached (workers + carts == city tiles)
# 29 - number of workers
# 30 - number of carts
# 31 - number of friendly units
# 32 - research progress for coal
# 33 - research progress for uranium
# 34 - progress (from 0 to 1) until next day
# 35 - progress until next night
# 36 - progress until finish
# 37 - is night
# 38 - current cycle
# start with city tiles to know their positions to fill units cells
for k, city in list(player.cities.items()) + list(opponent.cities.items()):
if city.team == player.team:
city_tiles_count = player_city_tiles_count
cities_count = player_cities_count
units_count = player_units_count
workers_count = player_workers_count
carts_count = player_carts_count
research_points = player_research_points
elif city.team == opponent.team:
city_tiles_count = opponent_city_tiles_count
cities_count = opponent_cities_count
units_count = opponent_units_count
workers_count = opponent_workers_count
carts_count = opponent_carts_count
research_points = opponent_research_points
else:
raise ValueError
current_light_upkeep = city.get_light_upkeep()
current_fuel = city.fuel
current_city_tiles_count = 0
for _ in city.citytiles:
current_city_tiles_count += 1
for city_tile in city.citytiles:
# city tile group
y, x = city_tile.pos.x + shift, city_tile.pos.y + shift
A2[18, x, y] = 1
if city_tile.team == player.team:
A2[19, x, y] = 1
elif city_tile.team == opponent.team:
A2[20, x, y] = 1
else:
raise ValueError
if city_tile.can_act():
A2[21, x, y] = 1
if city_tile.team == player.team:
player_city_tiles_coords[f"ct_{x}_{y}"] = (x, y) # to save only the operable units
A2[22, x, y] = current_city_tiles_count / CITY_TILES_IN_CITY_BOUND
A2[23, x, y] = UPKEEP_BOUND_PER_TILE / current_light_upkeep
A2[24, x, y] = current_fuel / FUEL_BOUND
A2[25, x, y] = min(1, current_fuel / (min(10, to_next_day) * current_light_upkeep)) # ratio to survive
# common group
A2[26, x, y] = city_tiles_count / CITY_TILES_BOUND
A2[27, x, y] = cities_count / CITIES_BOUND
if units_count == city_tiles_count:
A2[28, x, y] = 1
A2[29, x, y] = workers_count / WORKERS_BOUND
A2[30, x, y] = carts_count / CARTS_BOUND
A2[31, x, y] = units_count / UNITS_BOUND
A2[32, x, y] = min(research_points / COAL_RESEARCH_POINTS, 1)
A2[33, x, y] = min(research_points / URAN_RESEARCH_POINTS, 1)
A2[34, x, y] = 1 - to_next_day / CYCLE_LENGTH
A2[35, x, y] = 1 - to_next_night / CYCLE_LENGTH
A2[36, x, y] = turn / MAX_DAYS
A2[37, x, y] = is_night
A2[38, x, y] = current_cycle / TOTAL_CYCLES
for unit in player.units + opponent.units:
# unit group
if unit.team == player.team:
city_tiles_count = player_city_tiles_count
cities_count = player_cities_count
units_count = player_units_count
workers_count = player_workers_count
carts_count = player_carts_count
research_points = player_research_points
elif unit.team == opponent.team:
city_tiles_count = opponent_city_tiles_count
cities_count = opponent_cities_count
units_count = opponent_units_count
workers_count = opponent_workers_count
carts_count = opponent_carts_count
research_points = opponent_research_points
else:
raise ValueError
y, x = unit.pos.x + shift, unit.pos.y + shift
A2[0, x, y] = 1
if unit.team == player.team:
A2[1, x, y] = 1
elif unit.team == opponent.team:
A2[2, x, y] = 1
else:
raise ValueError
is_unit_at_home = 1 if A2[18, x, y] == 1 else 0
A2[3, x, y] = is_unit_at_home
X = np.zeros(9, dtype=np.half)
if unit.is_worker():
X[0] = 1
elif unit.is_cart():
X[1] = 1
else:
raise ValueError
if unit.can_act():
X[2] = 1
if unit.can_build(current_game_state.map):
X[3] = 1
X[4] = unit.cargo.wood / WORKERS_CARGO
X[5] = unit.cargo.coal / WORKERS_CARGO
X[6] = unit.cargo.uranium / WORKERS_CARGO
X[7] = unit.get_cargo_space_left() / WORKERS_CARGO
X[8] = (unit.cargo.wood * WOOD_FUEL_VALUE +
unit.cargo.coal * COAL_FUEL_VALUE +
unit.cargo.uranium * URAN_FUEL_VALUE) / FUEL_BOUND
# there are many unit can share the same position at home
# so save unique unit parameters in X array and store it in dictionary if unit is at home
# if unit is not at home so it has a unique position, put it inside A2 array
if is_unit_at_home:
if unit.can_act() and unit.team == player.team:
player_units_coords[unit.id] = ((x, y), (X, unit.is_worker()))
else:
if unit.can_act() and unit.team == player.team:
player_units_coords[unit.id] = ((x, y), (None, unit.is_worker()))
A2[9:18, x, y] = X
# common group
A2[26, x, y] = city_tiles_count / CITY_TILES_BOUND
A2[27, x, y] = cities_count / CITIES_BOUND
if units_count == city_tiles_count:
A2[28, x, y] = 1
A2[29, x, y] = workers_count / WORKERS_BOUND
A2[30, x, y] = carts_count / CARTS_BOUND
A2[31, x, y] = units_count / UNITS_BOUND
A2[32, x, y] = min(research_points / COAL_RESEARCH_POINTS, 1)
A2[33, x, y] = min(research_points / URAN_RESEARCH_POINTS, 1)
A2[34, x, y] = 1 - to_next_day / CYCLE_LENGTH
A2[35, x, y] = 1 - to_next_night / CYCLE_LENGTH
A2[36, x, y] = turn / MAX_DAYS
A2[37, x, y] = is_night
A2[38, x, y] = current_cycle / TOTAL_CYCLES
A = np.concatenate((A2, A1), axis=0)
# define headers
# layers:
# 0 - an operable one
# 1 - is worker
# 2 - is cart
# 3 - is city tile
# 4 - prev pos for units
# 5 - prev prev pos for units
number_of_header_layers = 6
units_headers = {}
if player_units_coords:
for k, ((x, y), (X, is_worker)) in player_units_coords.items():
head = np.zeros((number_of_header_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
worker = np.array([1, 1, 0, 0], dtype=np.half)
cart = np.array([1, 0, 1, 0], dtype=np.half)
head[:4, x, y] = worker if is_worker else cart
if k in units_actions_dict.keys():
units_actions_dict[k].append((x, y))
unit_prev_pos = units_actions_dict[k][-2]
if len(units_actions_dict[k]) > 2:
unit_prev_prev_pos = units_actions_dict[k][-3]
else:
unit_prev_prev_pos = units_actions_dict[k][-2]
else:
units_actions_dict[k] = []
units_actions_dict[k].append((x, y))
unit_prev_pos = (x, y)
unit_prev_prev_pos = (x, y)
head[4, unit_prev_pos[0], unit_prev_pos[1]] = 1
head[5, unit_prev_prev_pos[0], unit_prev_prev_pos[1]] = 1
head = np.moveaxis(head, 0, -1)
units_headers[k] = (head, (x, y), X, is_worker)
city_tiles_headers = {}
if player_city_tiles_coords:
for k, (x, y) in player_city_tiles_coords.items():
head = np.zeros((number_of_header_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
head[:4, x, y] = np.array([1, 0, 0, 1], dtype=np.half)
head = np.moveaxis(head, 0, -1)
city_tiles_headers[k] = head
B = np.moveaxis(A, 0, -1)
outputs = {"stem": B,
"units_headers": units_headers,
"city_tiles_headers": city_tiles_headers}
return outputs
|
3a54ad62fa341ca57528c5ee32b45d749982f286
| 3,644,339
|
def source_files(goto, wkdir, srcdir=None):
"""Source files appearing in symbol table.
Source file path names in symbol table are absolute or relative to
wkdir. If srcdir is given, return only files under srcdir.
"""
wkdir = srcloct.abspath(wkdir)
srcs = [dfn['file']
for dfn in parse_symbol_table(symbol_table(goto), wkdir)]
srcs = [src for src in srcs if src and not srcloct.is_builtin(src)]
if srcdir:
srcdir = srcloct.abspath(srcdir)
srcs = [src for src in srcs if src.startswith(srcdir)]
return sorted(set(srcs))
|
bacb86942b5f82ecc902699b81de5d92868ddd57
| 3,644,340
|
def textBlurBackground(img, text, font, fontScale, textPos, textThickness=1, textColor=(0, 255, 0), kneral=(33, 33),
pad_x=3, pad_y=3):
"""
Draw text with background blured, control the blur value, with kernal(odd, odd)
@param img:(mat) which you want to draw text
@param text: (string) text you want draw
@param font: fonts face, like FONT_HERSHEY_COMPLEX, FONT_HERSHEY_PLAIN etc.
@param fontScale: (double) the size of text, how big it should be.
@param textPos: tuple(x,y) position where you want to draw text
@param textThickness:(int) fonts weight, how bold it should be.
@param textColor: tuple(BGR), values -->0 to 255 each
@param kneral: tuple(3,3) int as odd number: higher the value, more blurry background would be
@param pad_x: int(pixels) padding of in x direction
@param pad_y: int(pixels) padding of in y direction
@return: img mat, with text drawn, with background blured
call the function:
img =textBlurBackground(img, 'Blured Background Text', cv2.FONT_HERSHEY_COMPLEX, 0.9, (20, 60),2, (0,255, 0), (49,49), 13, 13 )
"""
(t_w, t_h), _ = cv.getTextSize(text, font, fontScale, textThickness) # getting the text size
x, y = textPos
blur_roi = img[y - pad_y - t_h: y + pad_y, x - pad_x:x + t_w + pad_x] # croping Text Background
img[y - pad_y - t_h: y + pad_y, x - pad_x:x + t_w + pad_x] = cv.blur(blur_roi,
kneral) # merging the blured background to img
cv.putText(img, text, textPos, font, fontScale, textColor, textThickness)
# cv.imshow('blur roi', blur_roi)
# cv.imshow('blured', img)
return img
|
dd4c49a7cf15af4273e1b3689fa6caabe8242ea0
| 3,644,341
|
from re import T
def local_response_normalization_2d_v2(in_vw, alpha, k, beta, n):
"""
cross-channel local response normalization for 2D feature maps
- input is bc01
output[i]
= value of the i-th channel
= input[i] / (k + alpha * sum(input[j]^2 for j) ** beta)
- where j is over neighboring channels (from i - n // 2 to i + n // 2)
This code is adapted from pylearn2.
https://github.com/lisa-lab/pylearn2/blob/master/LICENSE.txt
"""
assert n % 2 == 1, "n must be odd"
in_var = in_vw.variable
b, ch, r, c = in_vw.symbolic_shape()
half_n = n // 2
input_sqr = T.sqr(in_var)
extra_channels = T.zeros((b, ch + 2 * half_n, r, c))
input_sqr = T.set_subtensor(extra_channels[:, half_n:half_n + ch, :, :],
input_sqr)
scale = k + alpha * treeano.utils.smart_sum([input_sqr[:, i:i + ch, :, :]
for i in range(n)])
scale = scale ** beta
return in_var / scale
|
23f810dbd4f36d1817c57ceeabbacad1cf8e0239
| 3,644,342
|
def add_new_user():
"""
This function adds a new user
:return: Response Code
"""
newuser = {}
if request.method == "POST":
try:
newuser['username'] = str(request.data.get('username').strip())
newuser['first_name'] = str(request.data.get('first_name').strip())
newuser['last_name'] = str(request.data.get('last_name').strip())
newuser['email'] = str(request.data.get('email').strip())
newuser['password'] = str(request.data.get('password').strip())
newuser['verification_code'] = str(request.data.get(
'verification_code').strip())
except Exception as e:
print(e)
abort(500)
user = User(**newuser)
user.save()
return make_response(jsonify(status=201, msg="User {} successfully added".format(user.username) +
"to database"), 201)
|
32abdd61ff4ad574a0e097553d3332f6f67d57dd
| 3,644,344
|
def has_wildcard(url) -> bool:
"""
Check if the url contains a wildcard in last subdomain.
:param url: The url to check
:type url: str
:return: True if the url contains a wildcard in the last subdomain, False otherwise
:rtype: bool
"""
subdomain = extract(url).subdomain
return subdomain.split(".")[0] == "*"
|
5dbf1a0220ad6c4af3bfe344a3aaa97473918995
| 3,644,345
|
def tmle_calculator(y, ystar1, ystar0, ystara, h1w, h0w, haw, splits,
measure='ate', lower_bound=None, upper_bound=None):
"""Function to calculate TMLE estimates for SingleCrossfitTMLE, and DoubleCrossfitTMLE
"""
if measure in ["ate", "risk_difference"]:
# Unbounding if continuous outcome (ate)
if measure == "ate":
# Unbounding continuous outcomes
y = tmle_unit_unbound(y, mini=lower_bound, maxi=upper_bound)
ystar1 = tmle_unit_unbound(ystar1, mini=lower_bound, maxi=upper_bound)
ystar0 = tmle_unit_unbound(ystar0, mini=lower_bound, maxi=upper_bound)
ystara = tmle_unit_unbound(ystara, mini=lower_bound, maxi=upper_bound)
# Point Estimate
estimate = np.mean(ystar1 - ystar0)
# Variance estimate
variance = []
for s in set(splits):
ys = y[splits == s]
ystar1s = ystar1[splits == s]
ystar0s = ystar0[splits == s]
ystaras = ystara[splits == s]
haws = haw[splits == s]
ic = haws * (ys - ystaras) + (ystar1s - ystar0s) - estimate
variance.append(np.var(ic, ddof=1))
return estimate, (np.mean(variance) / y.shape[0])
elif measure == 'risk_ratio':
# Point Estimate
estimate = np.mean(ystar1) / np.mean(ystar0)
variance = []
for s in set(splits):
ys = y[splits == s]
ystar1s = ystar1[splits == s]
ystar0s = ystar0[splits == s]
ystaras = ystara[splits == s]
h1ws = h1w[splits == s]
h0ws = h0w[splits == s]
ic = (1/np.mean(ystar1s) * (h1ws * (ys - ystaras)) + ystar1s - np.mean(ystar1s) -
(1/np.mean(ystar0s) * (-1 * h0ws * (ys - ystaras)) + ystar0s - np.mean(ystar0s)))
variance.append(np.var(ic, ddof=1))
return estimate, (np.mean(variance) / y.shape[0])
elif measure == 'odds_ratio':
# Point Estimate
estimate = (np.mean(ystar1) / (1-np.mean(ystar1))) / (np.mean(ystar0) / (1-np.mean(ystar0)))
variance = []
for s in set(splits):
ys = y[splits == s]
ystar1s = ystar1[splits == s]
ystar0s = ystar0[splits == s]
ystaras = ystara[splits == s]
h1ws = h1w[splits == s]
h0ws = h0w[splits == s]
ic = ((1-np.mean(ystar1s))/np.mean(ystar1s)*(h1ws*(ys - ystaras) + ystar1s) -
(1-np.mean(ystar0s))/np.mean(ystar0s)*(-1*h0ws*(ys - ystaras) + ystar0s))
variance.append(np.var(ic, ddof=1))
return estimate, (np.mean(variance) / y.shape[0])
else:
raise ValueError("Invalid measure requested within function: tmle_calculator. Input measure is " +
str(measure) + " but only 'ate', 'risk_difference', 'risk_ratio', and "
"'odds_ratio' are accepted.")
|
36f6b131044bd3b53044a4bfe0954eff1325bb59
| 3,644,346
|
def gen_gap(Pn, T, Q):
"""Runs the generalization gap test. This test
simply checks the difference between the likelihood
assigned to the training set versus that assigned to
a held out test set.
Inputs:
Pn: (n X d) np array containing the held out test sample
of dimension d
T: (l X d) np array containing the training sample of
dimension d
Q: trained model of type scipy.neighbors.KernelDensity
Outputs:
log_lik_gap: scalar representing the difference of the log
likelihoods of Pn and T
"""
return Q.score(T) - Q.score(Pn)
|
d57d16c06d05cea86e6f6ea89484574f20500170
| 3,644,347
|
def get_shapes(node, intermediate=False, exclusive=False):
"""Get the shapes of given node.
Args:
node (str): Node to query its shapes
intermediate (bool): Get intermediate shapes when True.
exclusive (bool): Only return the intermediate shapes if True.
Please note that the intermediate flag must be True as well.
Returns:
list: The shapes found below given node.
"""
# if given node is a list, assume first element
if isinstance(node, list):
node = node[0]
LOG.info("Given node is a list. Using first element.")
# return as list if given node is already a shape
if cmds.objectType(node, isAType="shape"):
return [node]
# query shapes
shapes = cmds.listRelatives(
node, shapes=True, type="deformableShape", path=True
)
shapes = shapes or []
# separate shapes orig
orig = []
for each in list(shapes): # duplicated `shapes` object to remove safely
if cmds.ls(each, intermediateObjects=True):
orig.append(each)
shapes.remove(each)
if not intermediate:
return shapes
if exclusive:
return orig
return shapes + orig
|
9e6d1c3e9030d1ce2804953cc7316d53840d3195
| 3,644,348
|
def solve_mip_mlp_elided(verif_instance):
"""Compute optimal attack loss for MLPs, via exactly solving MIP."""
assert MIP_SOLVERS, 'No MIP solvers installed with cvxpy.'
assert verif_instance.type == utils.VerifInstanceTypes.MLP_ELIDED
params, bounds, obj, obj_const = (
verif_instance.params, verif_instance.bounds, verif_instance.obj,
verif_instance.const)
layer_sizes = utils.mlp_layer_sizes(params)
on_state = []
post_activations = [cp.Variable((1, layer_sizes[0]))]
pre_activations = []
constraints = []
for (i, param) in enumerate(params):
W, b = param
b = jnp.reshape(b, (1, b.size))
on_state.append(cp.Variable((1, b.size), boolean=True))
pre_activations.append(cp.Variable((1, b.size)))
post_activations.append(cp.Variable((1, b.size)))
# Linear relaxation of ReLU constraints
constraints += [pre_activations[-1] == post_activations[-2]@W + b]
constraints += [post_activations[-1] >= pre_activations[-1]]
constraints += [post_activations[-1] >= 0]
# If ReLU is off, post activation is non-positive. Otherwise <= ub
constraints += [post_activations[-1] <= cp.multiply(on_state[-1],
bounds[i+1].ub)]
# If ReLU is off, pre-activation is non-positive. Otherwise <= ub_pre
constraints += [pre_activations[-1] <= cp.multiply(on_state[-1],
bounds[i+1].ub_pre)]
# If ReLU is on, post-activation == pre-activation
# Define <= here, >= constraint added above.
constraints += [post_activations[-1]-pre_activations[-1] <=
cp.multiply(1-on_state[-1],
bounds[i+1].ub-bounds[i+1].lb_pre)]
# Optionally, include IBP bounds to speed up MIP solving
# Post activations are within bounds
# i=0 case encodes input constraint
for (i, post) in enumerate(post_activations):
constraints += [post <= bounds[i].ub]
constraints += [post >= bounds[i].lb]
# # Pre activations are within bounds
for (i, pre) in enumerate(pre_activations):
constraints += [pre <= bounds[i+1].ub_pre]
constraints += [pre >= bounds[i+1].lb_pre]
# Set objective over final post-activations
obj_cp = cp.sum(cp.multiply(obj, post_activations[-1]))
# Define and solve problem
problem = cp.Problem(cp.Maximize(obj_cp), constraints)
# NB: Originally, we used cp.ECOS_BB here, but cvxpy 1.1 drops support,
# so we just use the first available MIP solver (which is dependent on user
# installation).
problem.solve(solver=MIP_SOLVERS[0])
# Report results
info = {
'problem': problem,
'post': post_activations,
'pre': pre_activations,
}
return obj_cp.value + obj_const, info
|
89ef1f133598feaf73d265411b5ed6597736ddf5
| 3,644,349
|
def compute_kullback_leibler_check_statistic(n=100, prngstate=None):
"""Compute the lowest of the survival function and the CDF of the exact KL
divergence KL(N(mu1,s1)||N(mu2,s2)) w.r.t. the sample distribution of the
KL divergence drawn by computing log(P(x|N(mu1,s1)))-log(P(x|N(mu2,s2)))
over a sample x~N(mu1,s1). If we are computing the KL divergence
accurately, the exact value should fall squarely in the sample, and the
tail probabilities should be relatively large.
"""
if prngstate is None:
raise TypeError('Must explicitly specify numpy.random.RandomState')
mu1 = mu2 = 0
s1 = 1
s2 = 2
exact = gaussian_kl_divergence(mu1, s1, mu2, s2)
sample = prngstate.normal(mu1, s1, n)
lpdf1 = gaussian_log_pdf(mu1, s1)
lpdf2 = gaussian_log_pdf(mu2, s2)
estimate, std = kl.kullback_leibler(sample, lpdf1, lpdf2)
# This computes the minimum of the left and right tail probabilities of the
# exact KL divergence vs a gaussian fit to the sample estimate. There is a
# distinct negative skew to the samples used to compute `estimate`, so this
# statistic is not uniform. Nonetheless, we do not expect it to get too
# small.
return erfc(abs(exact - estimate) / std) / 2
|
8c7036e89a3bfd347b613efac76c1b8dffde2cfa
| 3,644,350
|
def build_signature(inputs, outputs):
"""Build the signature for use when exporting the graph.
Args:
inputs: a dictionary from tensor name to tensor
outputs: a dictionary from tensor name to tensor
Returns:
The signature, a SignatureDef proto, specifies the input/output tensors
to bind when running prediction.
"""
signature_inputs = {
key: saved_model_utils.build_tensor_info(tensor)
for key, tensor in inputs.items()
}
signature_outputs = {
key: saved_model_utils.build_tensor_info(tensor)
for key, tensor in outputs.items()
}
signature_def = signature_def_utils.build_signature_def(
signature_inputs, signature_outputs,
signature_constants.PREDICT_METHOD_NAME)
return signature_def
|
ac760f7efcb27cf985aa9048015ba3be77230bc4
| 3,644,351
|
def fuse_depthwise_conv2d(input_graph_def):
"""Modifies the provided graph by fusing a set of ops into a single
_FusedDepthwiseConv2d op.
DepthwiseConv2dNative + BiasAdd + Activation => _FusedDepthwiseConv2dNative
Args:
input_graph_def: A GraphDef containing a model.
Returns:
Modified graph with FusedDepthwiseConv2dNative ops generated, and modified
weights.
Raises:
ValueError: If the graph is badly formed with duplicate node names.
"""
# Two passes approach, first find pattern of
# DepthwiseConv2dNative + BiasAdd + Activation
# Then find pattern of
# DepthwiseConv2dNative + BiasAdd
graph_def = _fuse_depthwise_conv2d_with_match_function(
input_graph_def, _find_contraction_with_bias_and_activation)
graph_def = _fuse_depthwise_conv2d_with_match_function(
graph_def, _find_contraction_with_bias)
graph_def = _fuse_depthwise_conv2d_with_match_function(
graph_def, _find_contraction_with_activation)
return graph_def
|
bbfdfbc02debfcad5e1c965c09c9039c3f15faac
| 3,644,352
|
def pandas_to_example_str(obj, *, local_data_model=None) -> str:
"""
Convert data frame to a Python source code string.
:param obj: data frame to convert.
:param local_data_model: data model to use.
:return: Python source code representation of obj.
"""
if local_data_model is None:
local_data_model = data_algebra.default_data_model
pd_module_name = local_data_model.presentation_model_name
if not local_data_model.is_appropriate_data_instance(obj):
raise TypeError("Expect obj to be local_data_model.pd.DataFrame")
obj = obj.reset_index(drop=True, inplace=False)
nrow = obj.shape[0]
pandas_string = pd_module_name + ".DataFrame({"
for k in obj.columns:
col = obj[k]
nulls = local_data_model.bad_column_positions(col)
cells = ["None" if nulls[i] else col[i].__repr__() for i in range(nrow)]
pandas_string = (
pandas_string + "\n " + k.__repr__() + ": [" + ", ".join(cells) + "],"
)
pandas_string = pandas_string + "\n })"
return pandas_string
|
a0c1bd23797413b739c496e621c43a4f43293c17
| 3,644,353
|
from typing import Counter
def get_results_object_model(target_node, paths_dict, name_to_description, q1_doid_to_disease, probs=False):
"""
Returns pathway results as an object model
:param target_node: target_node DOID:1234
:param paths_dict: a dictionary (keys OMIM id's) with values (path_name,path_type)
:param name_to_description: a dictionary to translate between source_node and genetic condition name
:param q1_doid_to_disease: a dictionary to translate between target_node and disease name
:param probs: optional probability of the OMIM being the right one
:return: ``dict``
"""
ret_obj = dict()
source_node_list = paths_dict.keys()
if len(source_node_list) > 0:
if target_node in q1_doid_to_disease:
doid_name = q1_doid_to_disease[target_node]
else:
doid_name = target_node
ret_obj['target_disease'] = doid_name
ret_source_nodes_dict = dict()
ret_obj['source_genetic_conditions'] = ret_source_nodes_dict
source_node_names = []
for source_node in source_node_list:
if source_node in name_to_description:
source_node_names.append(name_to_description[source_node])
else:
source_node_names.append(source_node)
for source_node in source_node_list:
source_node_dict = {}
path_names, path_types = paths_dict[source_node]
if len(path_names) == 1:
path_list = []
path_list.append({'type': 'node',
'name': source_node,
'desc': name_to_description.get(source_node, '')})
path_names = path_names[0]
path_types = path_types[0]
for index in range(1, len(path_names) - 1):
if index % 2 == 1:
path_list.append({'type': 'rel',
'name': path_types[index]})
else:
path_list.append({'type': 'node',
'name': path_names[index],
'desc': get_node_property(path_names[index], 'name')})
path_list.append({'type': 'node',
'name': target_node,
'desc': q1_doid_to_disease.get(target_node, '')})
if probs:
if source_node in probs:
source_node_dict['conf'] = probs[source_node]
source_node_dict['path'] = path_list
else:
# print(to_print)
if probs:
if source_node in probs:
source_node_dict['conf'] = probs[source_node]
relationships_and_counts_dict = Counter(map(tuple, path_types))
relationships = list(relationships_and_counts_dict.keys())
counts = []
for rel in relationships:
counts.append(relationships_and_counts_dict[rel])
relationships_and_counts = []
for i in range(len(counts)):
relationships_and_counts.append((relationships[i], counts[i]))
relationships_and_counts_sorted = sorted(relationships_and_counts, key=lambda tup: tup[1])
count_list = []
for index in range(len(relationships_and_counts_sorted)):
relationship = relationships_and_counts_sorted[index][0]
count = relationships_and_counts_sorted[index][1]
count_list.append({'count': count,
'reltype': str(relationship)})
source_node_dict['counts'] = count_list
ret_source_nodes_dict[source_node] = source_node_dict
return ret_obj
|
5ac0ba140a80edf12005112330191d910673e34a
| 3,644,354
|
def MaxLonSep( maxarc, baselat ):
"""Calculates the maximum separation in longitude that a point can have
from a reference point at latitude baselat and still be within a given
great circle arc length, maxarc, of the reference point. All quantities
in radians."""
if abs(baselat) + maxarc <= 0.5 * pi:
#result = asin( abs( sin(maxarc) ) / cos( baselat ) )
#result = acos(sqrt(cos(baselat)**2 - sin(maxarc)**2)/cos(baselat))
c = cos( baselat )
s = abs( sin( maxarc ) )
y = s
x = sqrt( ( c + s ) * ( c - s ) )
result = atan2( y, x )
else:
result = pi
return result
|
cffd6639441f548682e47c9af22c194a18d0f9fe
| 3,644,355
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.