content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def validate_config_params(optimo_url, version, access_key):
"""Validates and normalizes the parameters passed to
:class:`optimo.api.OptimoAPI` constructor.
:param optimo_url: string url of the optimoroute's service
:param version: ``int`` or ``str`` denoting the API version
:param access_key: string access key provided by optimoroute
:return: ``tuple`` of the, possibly adjusted, passed parameters.
:raises OptimoError: On providing incomplete or invalid config data
"""
if not optimo_url or not isinstance(optimo_url, basestring):
raise OptimoError("'optimo_url' must be a url string")
validate_url(optimo_url)
if not version or not isinstance(version, basestring) or not \
version.startswith('v'):
raise OptimoError("'version' must be a string denoting the API version "
"you want to use('v1', 'v2', etc")
if not access_key or not isinstance(access_key, basestring):
raise OptimoError("'access_key' must be the string access key provided "
"to you by optimoroute")
return optimo_url, version, access_key
|
94056115d999a7e6e97cd343f2fd40ae8f99a6d9
| 3,647,997
|
def deque_and_stack():
"""Solution to exercise R-6.14.
Repeat the previous problem using the deque D and an initially empty
stack S.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
0. Initial state Deque [1, 2, 3, 4, 5, 6, 7, 8]
Stack []
1. popright() 4 nums to S Deque [1, 2, 3, 4]
from D Stack [8, 7, 6, 5]
2. popright() 1 num from D, Deque [4, 1, 2, 3]
addleft() that num to D Stack [8, 7, 6, 5]
3. addright() 1 nums to D Deque [4, 1, 2, 3, 5]
from S Stack [8, 7, 6]
4. popleft() 1 num from D, Deque [1, 2, 3, 5, 4]
addright() that num to D Stack [8, 7, 6]
5. addright() 3 nums to D Deque [1, 2, 3, 5, 4, 6, 7, 8]
from S Stack []
"""
deq = deque([1, 2, 3, 4, 5, 6, 7, 8])
stack = ArrayStack()
for _ in range(4):
stack.push(deq.pop()) # Step 1
deq.appendleft(deq.pop()) # Step 2
deq.append(stack.pop()) # Step 3
deq.append(deq.popleft()) # Step 4
for _ in range(3):
deq.append(stack.pop()) # Step 5
return deq, stack
|
012b6d5916247c34688749d08156a65c5f9b5634
| 3,647,998
|
import re
def apply_subst(name, user):
"""
user.username forced in lowercase (VMware Horizon)
"""
name = re.sub(r'_SCIPER_DIGIT_', user.sciper_digit, name)
name = re.sub(r'_SCIPER_', user.sciper, name)
name = re.sub(r'_USERNAME_', user.username.lower(), name)
name = re.sub(r'_HOME_DIR_', user.home_dir, name)
name = re.sub(r'_GROUPNAME_', user.groupname, name)
name = re.sub(r'_DOMAIN_', user.domain, name)
name = re.sub(r'_UID_', user.uid, name)
name = re.sub(r'_GID_', user.gid, name)
name = re.sub(r'_FSTYPE_', user.automount_fstype, name)
name = re.sub(r'_HOST_', user.automount_host, name)
name = re.sub(r'_PATH_', user.automount_path, name)
name = re.sub(r'_OPTIONS_', user.automount_options, name)
return name
|
b2df5630cc63ecf0e8468e2eb19019ec4bd9ad2a
| 3,647,999
|
def A_real_deph(Q_deph, Kt_real_deph, deltaT_diff_deph):
"""
Calculates the real heatransfer area.
Parameters
----------
Q_deph : float
The heat load of dephlegmator, [W] , [J/s]
deltaT_diff_deph : float
The coefficient difference of temperatures, [degrees celcium]
Kt_real_deph : float
The heat ransfer coefficient [W/(m**2 * degrees celcium)]
Returns
-------
A_real_deph : float
The real heat ransfer area, [m**2]
References
----------
Романков, формула 4.72, стр.168
"""
return Q_deph / (Kt_real_deph * deltaT_diff_deph)
|
5c70a6e179922f90fbb4fda859d6911eb1f048e6
| 3,648,000
|
from typing import Union
def is_1d_like(oned_like_object: Union[np.ndarray, np.void]) -> bool:
"""
Checks if the input is either a 1D numpy array or a structured numpy row.
Parameters
----------
oned_like_object : Union[numpy.ndarray, numpy.void]
The object to be checked.
Raises
------
TypeError
The input is neither a numpy ndarray -- array-like object -- nor a
numpy void -- a row of a structured numpy array.
Returns
-------
is_1d_like_array : boolean
True if the input is either a 1-dimensional numpy array or a row of a
structured numpy array, False otherwise.
"""
is_1d_like_array = False
if isinstance(oned_like_object, np.void):
is_1d_like_array = is_structured_row(oned_like_object)
elif isinstance(oned_like_object, np.ndarray):
is_1d_like_array = is_1d_array(oned_like_object)
else:
raise TypeError('The input should either be a numpy array-like object '
'(numpy.ndarray) or a row of a structured numpy array '
'(numpy.void).')
return is_1d_like_array
|
c55a3e0e56d9cdce16bd478b882c72fdda5b6eba
| 3,648,001
|
def PDef (inDict):
""" Create TableDesc from the contents of a Python Dictionary
Returns new Table Descriptor
inDict = Python dictionary with values, must be in the form produced
by PGetDict
"""
################################################################
#
outTD = TableDesc(inDict["Table name"])
outTD.me = Obit.TableDescDef(inDict)
# Check
if len(outTD.Dict) <= 0:
raise RuntimeError("Failed to create valid Table Descriptor")
return outTD
# end PDef
|
b2024e0926b1484cf0eabc2874397b38eab39900
| 3,648,002
|
def minf(ar, min_val=nan):
"""
Gets the minimum value in the entire N-D array.
@param ar The array.
"""
sa = shape(ar)
np = 1
for n in sa:
np *= n
ar2 = reshape(ar, np)
ar2 = delete(ar2, get_nan_inds(ar2), 0)
cinds = []
if not isnan(min_val):
for ii in range(0, len(ar2)):
if ar2[ii] <= min_val:
cinds.append(ii)
if len(cinds) > 0:
ar2 = delete(ar2, cinds, 0)
if size(ar2) == 0:
return nan
return min(ar2)
|
ee6bf44da7960d63702673d25c4582472190032b
| 3,648,003
|
import random
import bisect
def generate_sector(size: int, object_weight: list) -> dict:
"""
Generates an Sector with Weighted Spawns
Args:
size: Int Representing the Size of the Sector (Size X Size)
object_weight: An Nested List with Object / Value Types
Examples:
generate_sector(6, [["*", 50], ["#", 10]]) would output an Map File where * is far more Common than #
Returns:
An Dict with Lists inside which Represent the Map Data per Row
"""
if size is 0:
raise ValueError("The Sector Size cant be 0")
size += 1
output = {}
placed_player = False
totals = []
running_total = 0
for w in object_weight:
running_total += w[1]
totals.append(running_total)
def next():
"""
Gets an Random Object from the Object - Weight List
"""
ran = random.random() * totals[-1]
i = bisect.bisect_right(totals, ran)
return object_weight[i][0]
for x in range(1, size):
row = []
for y in range(1, size):
object = next()
if placed_player is False and object is "@":
row.append(object)
placed_player = True
continue
elif placed_player is True and object is "@":
while object is "@":
object = next()
row.append(object)
output[x] = row
return output
|
514195b66c707b2e0dd67ea47b57fe56c1d28a86
| 3,648,005
|
def rbf_kernel(theta, h=-1):
"""Radial basis function kernel."""
sq_dist = _pdist(theta)
pairwise_dists = _squareform(sq_dist) ** 2
if h < 0: # if h < 0, using median trick
h = _numpy.median(pairwise_dists)
h = _numpy.sqrt(0.5 * h / _numpy.log(theta.shape[0] + 1))
# compute the rbf kernel
Kxy = _numpy.exp(-pairwise_dists / h ** 2 / 2)
dxkxy = -_numpy.matmul(Kxy, theta)
sumkxy = _numpy.sum(Kxy, axis=1)
for i in range(theta.shape[1]):
dxkxy[:, i] = dxkxy[:, i] + _numpy.multiply(theta[:, i], sumkxy)
dxkxy = dxkxy / (h ** 2)
return (Kxy, dxkxy)
|
734c24724191693f47438e470784947252acc8bd
| 3,648,006
|
def get_default_out_of_workspace_subcommands():
"""Returns a dict of default out-of-workspace subcommands as <name: `CliCommand`>s
:return: A dict of <name: `CliCommand`>
"""
new_cmd = NewCommand()
return {new_cmd.name(): new_cmd}
|
533adb3b268aea407055917a137b0b58b19e422e
| 3,648,007
|
import http
def test_json_view():
"""Turns a Python object into a response."""
def func(request):
return {'x': 1}
response = decorators.json_view(func)(mock.Mock())
assert isinstance(response, http.HttpResponse)
eq_(response.content, '{"x": 1}')
eq_(response['Content-Type'], 'application/json')
eq_(response.status_code, 200)
|
4b4470d5cac3a5fd7bbefef67dc80beeacdcb384
| 3,648,008
|
def run_model(idx):
"""
Run BART on idx index from dataset.
Args:
idx (int): The index of the dataset.
Returns:
tuple: tuple with
fname: Filename
slice_num: Slice number.
prediction: Reconstructed image.
"""
masked_kspace, reg_wt, fname, slice_num, crop_size, num_low_freqs = dataset[idx]
prediction = cs_total_variation(
args, masked_kspace, reg_wt, crop_size, num_low_freqs
)
return fname, slice_num, prediction
|
57913d5236af9d13d89a2de0e10a74c684c178cf
| 3,648,010
|
def is_tt_tensor(arg) -> bool:
"""Determine whether the object is a `TT-Tensor` or `WrappedTT` with underlying `TT-Tensor`.
:return: `True` if `TT-Tensor` or `WrappedTT(TT-Tensor)`, `False` otherwise
:rtype: bool
"""
return isinstance(arg, TT) or (isinstance(arg, WrappedTT) and
not arg.tt.is_tt_matrix)
|
767890ca0d8aca96deb6d777e7f972b5be99e11a
| 3,648,011
|
def top_predicted_outcomes(proba_pred, index_to_outcome_dict, N_top = 3):
""" extract the most likely outcomes based on a 1d-array of predicted probabilities
Parameters
----------
proba_pred: numpy 1d-array
array containing the predicted probabilities
index_to_outcome_dict: dict
reversed index systen for the outcomes
Returns
-------
dict
top outcomes along with their probability of occurrences
"""
# extract the indices of the top 'N_top' outcomes
idxs_top = np.argsort(proba_pred)[::-1][:N_top]
# top outcomes
top_outcomes = {index_to_outcome_dict[i+1]:proba_pred[i] for i in idxs_top}
return top_outcomes
|
6f67140b6d50c5c7b7b43947d33cb1934d76440b
| 3,648,012
|
def parse_kafka_table(beamsqltable, name, logger):
# loop through the kafka structure
# map all key value pairs to 'key' = 'value',
# except properties
"""
parse kafka parameter
"""
ddl = ""
kafka = beamsqltable.spec.get("kafka")
if not kafka:
message = f"Beamsqltable {name} has no Kafka connector descriptor."
logger.warning(message)
return None
# check mandatory fields in Kafka, topic, bootstrap.server
if not kafka.get("topic"):
message = f"Beamsqltable {name} has no kafka topic."
logger.warning(message)
return None
try:
_ = kafka["properties"]["bootstrap.servers"]
except KeyError:
message = f"Beamsqltable {name} has no kafka bootstrap servers found"
logger.warning(message)
return None
# the other fields are inserted, there is not a check for valid fields yet
for kafka_key, kafka_value in kafka.items():
# properties are iterated separately
if kafka_key == 'properties':
for property_key, property_value in kafka_value.items():
ddl += f",'properties.{property_key}' = '{property_value}'"
else:
ddl += f", '{kafka_key}' = '{kafka_value}'"
key_format = kafka.get("key.format")
if key_format is None:
message = f"Beamsqltable {name} has no key.format but it is mandatory \
for upsert-kafka"
logger.warning(message)
return None
return ddl
|
5a8baf4ee5ef935b12cd90957854c6c1aed3c4e5
| 3,648,013
|
def linear_operator_from_num_variables(num_variables, type_, W):
"""Generates the linear operator for the TV lasso Nesterov function
from number of variables.
Parameters:
----------
num_variables : Integer. The total number of variables, including the
intercept variable(s).
"""
A = list()
for k in range(0,num_variables):
Ak = Ak_from_pairs(k,num_variables,type_,W)
A.append(Ak.tocsr())
return A
|
b91eea3a2adcfac27a0ae2635f7059ff1e7b5dad
| 3,648,014
|
def hdi_of_mcmc(sample_vec, cred_mass=0.95):
"""
Highest density interval of sample.
"""
assert len(sample_vec), 'need points to find HDI'
sorted_pts = np.sort(sample_vec)
ci_idx_inc = int(np.floor(cred_mass * len(sorted_pts)))
n_cis = len(sorted_pts) - ci_idx_inc
ci_width = sorted_pts[ci_idx_inc:] - sorted_pts[:n_cis]
min_idx = np.argmin(ci_width)
hdi_min = sorted_pts[min_idx]
hdi_max = sorted_pts[min_idx + ci_idx_inc]
return hdi_min, hdi_max
|
cbe3478be80ecef9a7f45df31b289400352284b8
| 3,648,015
|
def compare_AlphaFz(sq_amp,sq_amp_baseline):
"""
Compare the baseline alpha squared amplitude with that of a single epoch.
Parameters
----------
sq_amp: float
Alpha squared amplitude (Fz) from a single epoch
cnt_baseline: float
Baseline alpha squared amplitude (Fz)
Returns
-------
feedback_val: float
Feedback value for stimulus presentation [-1,1]
"""
relative_error = (sq_amp-sq_amp_baseline)/sq_amp_baseline
feedback_val = relative_error
if feedback_val>1:
feedback_val = 1
elif feedback_val<-1:
feedback_val = -1
return feedback_val
|
290560dc815393799d61f51a7684b4bde309dbac
| 3,648,016
|
def get_filtered_acc_gas(database_year, start_year, end_year):
"""Returns gas avoided costs data
Parameters
----------
database_year: str
The year corresponding to the database that contains the avoided costs data.
Requires that year's database to have already been downloaded
using the `flexvalue downloaded-avoided-costs-data-db --year 20XX` command.
start_year: int
Which year to start the filter of avoided costs data
end_year: int
Which year to end the filter of avoided costs data
Returns
-------
pd.DataFrame
"""
columns = [
"year",
"month",
*ACC_COMPONENTS_GAS,
]
columns_str = ", ".join(columns)
sql_str = f"""
SELECT *
FROM acc_gas
WHERE year >= {start_year}
AND year <= {end_year}
"""
con = get_db_connection(database_year=database_year)
return pd.read_sql(sql_str, con=con)
|
7b6db413e25135c72864664ccd7e0b371dae7cd9
| 3,648,017
|
import logging
import locale
def emr_app(config_name):
""" Application Factories
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
handler = RotatingFileHandler('emr.log')
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.wsgi_app = ProxyFix(app.wsgi_app)
login_manager.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
assets.init_app(app)
# Blueprints
app.register_blueprint(default_blueprint)
app.register_blueprint(dashboard_blueprint)
app.register_blueprint(resources_blueprint)
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
with app.app_context():
db.create_all()
return app
|
e2ba6ec0f186baf79ccf87b6ce9360afe93b17f4
| 3,648,019
|
from datetime import datetime
def _split_datetime_from_line(line):
"""Docker timestamps are in RFC3339 format: 2015-08-03T09:12:43.143757463Z, with everything up to the first space
being the timestamp.
"""
log_line = line
dt = datetime.datetime.utcnow()
pos = line.find(" ")
if pos > 0:
dt = scalyr_util.rfc3339_to_datetime(line[0:pos])
log_line = line[pos + 1 :]
return (dt, log_line)
|
a5e373218d9a7c80562afb91a79e27a02647f89e
| 3,648,020
|
def set_signal_winch(handler):
""" return the old signal handler """
global winch_handler
old_handler=winch_handler
winch_handler=handler
return old_handler
|
1635090e9601dff4c132c1e26bd3072d8c5752c8
| 3,648,021
|
import logging
from datetime import datetime
def visdom_loss_handler(modules_dict, model_name):
"""
Attaches plots and metrics to trainer.
This handler creates or connects to an environment on a running Visdom dashboard and creates a line plot that tracks the loss function of a
training loop as a function of the number of iterations. This can be attached to an Ignite Engine, and the training closure must
have 'loss' as one of the keys in its return dict for this plot to be made.
See documentation for Ignite (https://github.com/pytorch/ignite) and Visdom (https://github.com/facebookresearch/visdom) for more information.
"""
tim = Timer()
tim.attach( trainer,
start=Events.STARTED,
step=Events.ITERATION_COMPLETED,
)
vis = visdom.Visdom(env=environment)
def create_plot_window(vis, xlabel, ylabel, title):
return vis.line(X=np.array([1]), Y=np.array([np.nan]), opts=dict(xlabel=xlabel, ylabel=ylabel, title=title))
train_loss_window = create_plot_window(vis, '#Iterations', 'Loss', description)
log_interval = 10
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration -1)
if iter % log_interval == 0:
logging.info("Epoch[{}] Iteration: {} Time: {} Loss: {:.2f}".format(
engine.state.epoch, iter, str(datetime.timedelta(seconds=int(tim.value()))), engine.state.output
))
vis.line(X=np.array([engine.state.iteration]),
Y=np.array([engine.state.output]),
update='append',
win=train_loss_window)
save_interval = 50
handler = ModelCheckpoint('/tmp/models', model_name, save_interval = save_interval, n_saved=5, create_dir=True, require_empty=False)
trainer.add_event_handler(Events.ITERATION_COMPLETED, handler, modules_dict)
|
4e53d1c3a1bd8b571960ea959a234f88c0f30c0b
| 3,648,023
|
from rdkit.Chem.Draw.qtCanvas import Canvas
from rdkit import Chem
from rdkit.Chem import AllChem
def MolToQPixmap(mol, size=(300, 300), kekulize=True, wedgeBonds=True, fitImage=False, options=None,
**kwargs):
""" Generates a drawing of a molecule on a Qt QPixmap
"""
if not mol:
raise ValueError('Null molecule provided')
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor = None
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if kekulize:
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
AllChem.Compute2DCoords(mol)
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
drawer.AddMol(mol, **kwargs)
canvas.flush()
return canvas.pixmap
|
8947edf412cd960c0c2735922db6a84a84622d06
| 3,648,024
|
def _unpad(string: str) -> str:
"""Un-pad string."""
return string[: -ord(string[len(string) - 1 :])]
|
dbd036afabc29047201a9ed2d6b299bb5fe3ba0f
| 3,648,025
|
from pathlib import Path
import shutil
def copy_dir_to_target(source_directory: Path, destination_directory: Path) -> bool:
"""
Args:
source_directory: a folder to copy
destination_directory: the parent directory to copy source_directory into
Returns: True if copy was successful, False otherwise
"""
if source_directory.exists() and source_directory.is_dir():
print("Found directory at %s" % source_directory.resolve())
else:
print("Unable to find required folder, looked at %s" % source_directory.resolve())
return False
print("Copying to %s" % destination_directory)
shutil.copytree(str(source_directory), str(destination_directory / source_directory.name))
return True
|
2dd67db56c17c787ea69189c52db11edcfcb0d3c
| 3,648,026
|
import re
def vlanlist_to_config(vlan_list, first_line_len=48, other_line_len=44, min_grouping_size=3):
"""Given a List of VLANs, build the IOS-like vlan list of configurations.
Args:
vlan_list (list): Unsorted list of vlan integers.
first_line_len (int, optional): The maximum length of the line of the first element of within the return list. Defaults to 48.
other_line_len (int, optional): The maximum length of the line of all other elements of within the return list. Defaults to 44.
min_grouping_size (int, optional): The minimum consecutive VLANs to aggregate with a hyphen . Defaults to Cisco's minimum grouping size of 3.
Returns:
list: Sorted string list of integers according to IOS-like vlan list rules
Example:
>>> from netutils.vlan import vlanlist_to_config
>>> vlanlist_to_config([1, 2, 3, 5, 6, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018])
['1-3,5,6,1000,1002,1004,1006,1008,1010,1012,1014', '1016,1018']
>>> vlanlist_to_config([1,3,5,6,100,101,102,103,104,105,107,109], min_grouping_size=2)
['1,3,5-6,100-105,107,109']
>>> vlanlist_to_config([1,3,5,6,100,101,102,103,104,105,107,109], min_grouping_size=1)
['1,3,5,6,100,101,102,103,104,105,107,109']
"""
def build_final_vlan_cfg(vlan_cfg):
if len(vlan_cfg) <= first_line_len:
return [vlan_cfg]
# Split VLAN config if lines are too long
first_line = re.match(f"^.{{0,{first_line_len}}}(?=,)", vlan_cfg)
vlan_cfg_lines = [first_line.group(0)]
next_lines = next_lines = re.compile(f"(?<=,).{{0,{other_line_len}}}(?=,|$)")
for line in next_lines.findall(vlan_cfg, first_line.end()):
vlan_cfg_lines.append(line)
return vlan_cfg_lines
# Fail if min_grouping_size is less than 1.
if min_grouping_size < 1:
raise ValueError("Minimum grouping size must be equal to or greater than one.")
# Sort and de-dup VLAN list
vlan_list = sorted(set(vlan_list))
# If grouping size is zero, sort, and return the config list as no other processing is required.
if min_grouping_size == 1:
return build_final_vlan_cfg(",".join([str(vlan) for vlan in vlan_list]))
# Group consecutive VLANs
vlan_groups = []
for _, vlan in groupby(enumerate(vlan_list), lambda vlan: vlan[0] - vlan[1]):
vlan_groups.append(list(map(itemgetter(1), vlan)))
# Check for invalid VLAN IDs
if vlan_list[0] < 1 or vlan_list[-1] > 4094:
raise ValueError("Valid VLAN range is 1-4094")
# Create VLAN portion of config
vlan_strings = []
for group in vlan_groups:
group_length = len(group)
group_string = f"{group[0]}"
# Compress based on grouping_size
if group_length >= min_grouping_size:
group_string += f"-{group[-1]}"
# If it does not match grouping_size, and is greater than one
elif group_length != 1:
group_string += f",{group[1]}"
vlan_strings.append(group_string)
return build_final_vlan_cfg(",".join(vlan_strings))
|
7af9dd8f0572a0105b04a87ce6ab889c380a886b
| 3,648,027
|
from typing import Optional
def get_next_url(bundle: dict) -> Optional[str]:
"""
Returns the URL for the next page of a paginated ``bundle``.
>>> bundle = {
... 'link': [
... {'relation': 'self', 'url': 'https://example.com/page/2'},
... {'relation': 'next', 'url': 'https://example.com/page/3'},
... {'relation': 'previous', 'url': 'https://example.com/page/1'},
... ]
... }
>>> get_next_url(bundle)
'https://example.com/page/3'
>>> bundle = {
... 'link': [
... {'relation': 'self', 'url': 'https://example.com/page/1'},
... ]
... }
>>> type(get_next_url(bundle))
<class 'NoneType'>
"""
if 'link' in bundle:
for link in bundle['link']:
if link['relation'] == 'next':
return link['url']
|
0fafa4dc56fb5e03838652419e94dceb8aed9e75
| 3,648,028
|
def convert_sklearn_variance_threshold(operator, device, extra_config):
"""
Converter for `sklearn.feature_selection.VarianceThreshold`.
Args:
operator: An operator wrapping a `sklearn.feature_selection.VarianceThreshold` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
var = operator.raw_operator.variances_
threshold = operator.raw_operator.threshold
indices = np.array([i for i in range(len(var)) if var[i] > threshold])
return ArrayFeatureExtractor(np.ascontiguousarray(indices), device)
|
fc5d87e2fd27b8e40bb82689651f4a81ac64f1a5
| 3,648,029
|
def query_merchant_users(bc_app, merchant=None, start_time=None, end_time=None):
"""
query merchant users
:param bc_app: beecloud.entity.BCApp
:param merchant: merchant account, if not passed, only users associated with app will be returned
:param start_time: if passed, only users registered after it will be returned
:param end_time: if passed, only users registered before it will be returned
:return: result contains beecloud.entity.MerchantUser list
"""
req_param = _TmpObject()
if merchant:
req_param.email = merchant
if start_time:
req_param.start_time = start_time
if end_time:
req_param.end_time = end_time
attach_app_sign(req_param, BCReqType.QUERY, bc_app)
url = get_rest_root_url() + 'rest/users?para=' + obj_to_quote_str(req_param)
tmp_resp = http_get(url, bc_app.timeout)
# if err encountered, [0] equals 0
if not tmp_resp[0]:
return tmp_resp[1]
# [1] contains result dict
resp_dict = tmp_resp[1]
bc_result = BCResult()
set_common_attr(resp_dict, bc_result)
if not bc_result.result_code:
user_dict_arr = resp_dict.get('users')
class_name = BCMerchantUser
users = []
if user_dict_arr:
users = [parse_dict_to_obj(user_dict, class_name) for user_dict in user_dict_arr]
bc_result.users = users
return bc_result
|
edf7df004bf97dbeb4e629ed8b1a3c2802519b91
| 3,648,030
|
def getUIQM(x):
"""
Function to return UIQM to be called from other programs
x: image
"""
x = x.astype(np.float32)
### UCIQE: https://ieeexplore.ieee.org/abstract/document/7300447
#c1 = 0.4680; c2 = 0.2745; c3 = 0.2576
### UIQM https://ieeexplore.ieee.org/abstract/document/7305804
c1 = 0.0282; c2 = 0.2953; c3 = 3.5753
uicm = _uicm(x)
uism = _uism(x)
uiconm = _uiconm(x, 10)
uiqm = (c1*uicm) + (c2*uism) + (c3*uiconm)
return uiqm
|
e3e0c06ef5a08c232829afc196a973d6ff02b915
| 3,648,031
|
def cart2spher(x, y, z):
"""Cartesian to Spherical coordinate conversion."""
hxy = np.hypot(x, y)
rho = np.hypot(hxy, z)
#if not rho:
# return np.array([0,0,0])
theta = np.arctan2(hxy, z)
phi = np.arctan2(y, x)
return rho, theta, phi
|
bafd7e0cb6c0d152f0fbb9b7deaf16ce85114065
| 3,648,032
|
def get_mean_and_stdv(dataset):
"""return means and standard
deviations along 0th axis of tensor"""
means = dataset.mean(0)
stdvs = dataset.std(0)
return means, stdvs
|
562f883d809f034be66244ad593a6f8a0bbe2ba5
| 3,648,033
|
from ToolBOSCore.Settings.ToolBOSConf import getConfigOption
from typing import Any
def getBuildRequirements():
"""
Returns a list of essential packages needed for a minimalistic
software installation tree (SIT).
Opposed to getMinRequirements() this contains everything needed to
build software packages.
"""
result = getConfigOption( 'SIT_bootstrapFull' )
Any.requireIsListNonEmpty( result )
return result
|
1f0c91bd284d0249eaa0016792297a15c04323d5
| 3,648,034
|
import torch
def nms_3d(boxes, scores, nms_threshold):
"""
包装一下nms_gpu,该函数接收的数据维度是y1,x1,y2,x2,z1,z2 ,sores
:param boxes: tensor [n,(y1,x1,z1,y2,x2,z2)]
:param scores: tensor [n]
:param nms_threshold: 浮点标量
:return: keep: nms后保留的索引 tensor [m]
"""
# [n,(y1,x1,z1,y2,x2,z2)] => [n,(y1,x1,y2,x2,z1,z2 ,sores)], axis swapped for nms
box_with_score = torch.cat((boxes[:, [0, 1, 3, 4, 2, 5]], scores.unsqueeze(-1)), -1)
keep = nms_gpu(box_with_score, nms_threshold)
return keep
|
4fbcd24309a50af1064ddd3eb5956b6b783f78c9
| 3,648,035
|
import re
def report_install_status(ctx, op_id):
"""
:param ctx: CSM Context object
:param op_id: operational ID
Peeks into the install log to see if the install operation is successful or not
"""
failed_oper = r'Install operation {} aborted'.format(op_id)
output = ctx.send("show install log {} detail".format(op_id))
status, message = match_pattern(ctx.pattern, output)
report_log(ctx, status, message)
if re.search(failed_oper, output):
log_install_errors(ctx, output)
ctx.error("Operation {} failed".format(op_id))
return False
ctx.info("Operation {} finished successfully".format(op_id))
return True
|
67e02e892029204679c4e766cbd0272f4eb08a3d
| 3,648,036
|
def spltime(tseconds):
""" This gets the time in hours, mins and seconds """
hours = tseconds // 3600
minutes = int(tseconds / 60) % 60
seconds = tseconds % 60
return hours, minutes, seconds
|
a8ba14879da51ebbeac2ba201fc562a22fe13364
| 3,648,037
|
import time
import requests
def mine():
"""
This function will try to mine a new block by joining the negotiation process.
If we are the winner address, we will check the negotiation winner of the neighbour nodes, to grant that no node
has a different winner address.
"""
if blockchain is not None:
last_block = blockchain.chain[-1]
# Candidate to the negotiation and execute the negotiation algorithm
winner, negotiation_price = blockchain.proof_of_negotiation()
# If we won the negotiation, then start validating the block
if winner.address != blockchain.node_id:
return jsonify({"winner": winner.url, "won": False}), 200
# We must receive a reward for winning the reputation.
blockchain.submit_transaction(
sender_address=MINING_SENDER, recipient_address=blockchain.node_id,
value=MINING_REWARD, signature="", timestamp=time()
)
# Forge the new Block by adding it to the chain
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(
previous_hash=previous_hash,
validator_address=blockchain.node_id,
negotiation_price=negotiation_price
)
# Broadcast the new chain
print("Sending the new chain in broadcast...")
for node_url in blockchain.nodes:
print(node_url + '/nodes/resolve')
try:
requests.get(node_url + '/nodes/resolve')
except requests.exceptions.RequestException:
print("Node with url '" + node_url + "' isn't connected or doesn't exist anymore.")
print("New chain broadcast completed successfully!")
# Check if the validator of the last block is the same as the neighbour nodes
for node_url in blockchain.nodes:
print(node_url + '/chain')
try:
neighbour_chain = requests.get(node_url + '/chain').json()["chain"]
except requests.exceptions.RequestException:
print("Node with url '" + node_url + "' isn't connected or doesn't exist anymore.")
continue # skip the current iteration if we can't connect with the node
validator_address = neighbour_chain[-1]["validator"]
# If the address of the validator of the last block is different from the winner address, decrease the
# reputation of the neighbour, because the node tried to put false negotiation winner in the last block
if validator_address != winner.address:
blockchain.change_reputation(
node_address=blockchain.nodes[node_url].address,
change_lvl=INVALID_CHAIN_GRAVITY
)
response = {
'message': "New Block Forged",
'block_number': block['block_number'],
'transactions': block['transactions'],
'validator': block['validator'],
'previous_hash': block['previous_hash']
}
return jsonify(response), 200
else:
response = {'message': 'Blockchain hasn\'t been initialized yet!'}
return jsonify(response), 400
|
a7a313020d930d3fed1826e1e89165ee5b411311
| 3,648,039
|
def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0):
"""
Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
.. seealso:: :doc:`dns`
"""
return get_hub().resolver.getaddrinfo(host, port, family, socktype, proto, flags)
|
35ecba554f92b02645f9ba50eea8d8c70abf7723
| 3,648,040
|
def atomic_log(using=None):
"""
Decorator that surrounds atomic block, ensures that logged output requests will be stored inside database in case
of DB rollback
"""
if callable(using):
return AtomicLog(DEFAULT_DB_ALIAS)(using)
else:
return AtomicLog(using)
|
a1076ff275ab3c55e803bc78b1dcb31d5761335f
| 3,648,041
|
import imp
def _readConfigFile(config_file, verbose):
"""Read configuration file options into a dictionary."""
if not os.path.exists(config_file):
raise RuntimeError("Couldn't open configuration file '%s'." % config_file)
try:
conf = {}
configmodule = imp.load_source("configuration_module", config_file)
for k, v in vars(configmodule).items():
if k.startswith("__"):
continue
elif isfunction(v):
continue
conf[k] = v
except Exception, e:
# if verbose >= 1:
# traceback.print_exc()
exceptioninfo = traceback.format_exception_only(sys.exc_type, sys.exc_value) #@UndefinedVariable
exceptiontext = ""
for einfo in exceptioninfo:
exceptiontext += einfo + "\n"
# raise RuntimeError("Failed to read configuration file: " + config_file + "\nDue to " + exceptiontext)
print >>sys.stderr, "Failed to read configuration file: " + config_file + "\nDue to " + exceptiontext
raise
return conf
|
22aff77a384f3c69d8f4ffa2cfafcb9662d54398
| 3,648,042
|
def run_street_queries(es, params_list, queries, formats):
"""Punto de entrada del módulo 'street.py'. Toma una lista de consultas de
calles y las ejecuta, devolviendo los resultados QueryResult.
Args:
es (Elasticsearch): Conexión a Elasticsearch.
params_list (list): Lista de ParametersParseResult.
queries (list): Lista de búsquedas, generadas a partir de
'params_list'.
formats (list): Lista de parámetros de formato de cada búsqueda, en
forma de diccionario.
Returns:
list: Lista de QueryResult, una por cada búsqueda.
"""
searches = []
for query, fmt in zip(queries, formats):
processed_query = query.copy()
if N.FULL_NAME in fmt[N.FIELDS]:
# La nomenclatura incluye el nombre de la provincia y del depto.,
# agregar esos campos a la query para luego poder extraer sus
# nombres.
processed_query['fields'] += (N.STATE, N.DEPT)
searches.append(data.StreetsSearch(processed_query))
data.ElasticsearchSearch.run_searches(es, searches)
for search, fmt in zip(searches, formats):
if N.FULL_NAME in fmt[N.FIELDS]:
# Agregar nomenclatura a cada hit del resultado.
for hit in search.result.hits:
full_name = '{}, {}, {}'.format(
hit[N.NAME], hit[N.DEPT][N.NAME], hit[N.STATE][N.NAME]
)
hit[N.FULL_NAME] = full_name
return [
QueryResult.from_entity_list(search.result.hits,
params.received_values(),
search.result.total,
search.result.offset)
for search, params in zip(searches, params_list)
]
|
bf052e7ea63280fde950d1663900f8420b6f4e97
| 3,648,043
|
def draw_text(text, bgcolor, plt_ax, text_plt):
"""
Render the text
:param str text: text to render
:param str bgcolor: backgroundcolor used to render text
:param matplotlib.axes.Axes plt_ax: figure sub plot instance
:param matplotlib.text.Text text_plt: plot of text
:return matplotlib.text.Text: updated plot of text
"""
if text_plt is None:
# render text with color
text_plt = plt_ax.text(0.95, 0.95, text, backgroundcolor=bgcolor,
horizontalalignment='right', verticalalignment='top',
transform=plt_ax.transAxes, fontsize=10)
else:
# update existing text
text_plt.set_text(text)
return text_plt
|
478ada3b4fbb3add935713268415cd4606ef58b3
| 3,648,044
|
def compute_95confidence_intervals(
record, episode, num_episodes, store_accuracies, metrics=["AccuracyNovel",]
):
"""Computes the 95% confidence interval for the novel class accuracy."""
if episode == 0:
store_accuracies = {metric: [] for metric in metrics}
for metric in metrics:
store_accuracies[metric].append(record[metric])
if episode == (num_episodes - 1):
# Compute std and confidence interval of the 'metric' accuracies.
accuracies = np.array(store_accuracies[metric])
stds = np.std(accuracies, 0)
record[metric + "_std"] = stds
record[metric + "_cnf"] = 1.96 * stds / np.sqrt(num_episodes)
return record, store_accuracies
|
32e9c971c1396fb663e1b940a6d5cd7a09ab6c03
| 3,648,045
|
def _pytype(dtype):
""" return a python type for a numpy object """
if dtype in ("int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"):
return int
elif dtype in ("float16", "float32", "float64", "float128"):
return float
elif dtype in ("complex64", "complex128", "complex256"):
return complex
else:
raise TypeError("not a recognized dtype: {0}".format(dtype))
|
47ed12c47ee4fa5986795b8f432e72cdd7ee945f
| 3,648,046
|
def pca(x, output_dim, dtype, name=None):
"""Computes pca on the dataset using biased covariance.
The pca analyzer computes output_dim orthonormal vectors that capture
directions/axes corresponding to the highest variances in the input vectors of
x. The output vectors are returned as a rank-2 tensor with shape
(input_dim, output_dim), where the 0th dimension are the components of each
output vector, and the 1st dimension are the output vectors representing
orthogonal directions in the input space, sorted in order of decreasing
variances.
The output rank-2 tensor (matrix) serves a useful transform purpose. Formally,
the matrix can be used downstream in the transform step by multiplying it to
the input tensor x. This transform reduces the dimension of input vectors to
output_dim in a way that retains the maximal variance.
NOTE: To properly use PCA, input vector components should be converted to
similar units of measurement such that the vectors represent a Euclidean
space. If no such conversion is available (e.g. one element represents time,
another element distance), the canonical approach is to first apply a
transformation to the input data to normalize numerical variances, i.e.
tft.scale_to_z_score(). Normalization allows PCA to choose output axes that
help decorrelate input axes.
Below are a couple intuitive examples of PCA.
Consider a simple 2-dimensional example:
Input x is a series of vectors [e, e] where e is Gaussian with mean 0,
variance 1. The two components are perfectly correlated, and the resulting
covariance matrix is
[[1 1],
[1 1]].
Applying PCA with output_dim = 1 would discover the first principal component
[1 / sqrt(2), 1 / sqrt(2)]. When multipled to the original example, each
vector [e, e] would be mapped to a scalar sqrt(2) * e. The second principal
component would be [-1 / sqrt(2), 1 / sqrt(2)] and would map [e, e] to 0,
which indicates that the second component captures no variance at all. This
agrees with our intuition since we know that the two axes in the input are
perfectly correlated and can be fully explained by a single scalar e.
Consider a 3-dimensional example:
Input x is a series of vectors [a, a, b], where a is a zero-mean, unit
variance Gaussian. b is a zero-mean, variance 4 Gaussian and is independent of
a. The first principal component of the unnormalized vector would be [0, 0, 1]
since b has a much larger variance than any linear combination of the first
two components. This would map [a, a, b] onto b, asserting that the axis with
highest energy is the third component. While this may be the desired
output if a and b correspond to the same units, it is not statistically
desireable when the units are irreconciliable. In such a case, one should
first normalize each component to unit variance first, i.e. b := b / 2.
The first principal component of a normalized vector would yield
[1 / sqrt(2), 1 / sqrt(2), 0], and would map [a, a, b] to sqrt(2) * a. The
second component would be [0, 0, 1] and map [a, a, b] to b. As can be seen,
the benefit of normalization is that PCA would capture highly correlated
components first and collapse them into a lower dimension.
Args:
x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in row vectors.
output_dim: The PCA output dimension (number of eigenvectors to return).
dtype: numpy dtype of entries in the returned matrix.
name: (Optional) A name for this operation.
Raises:
ValueError: if input is not a rank-2 Tensor.
Returns:
A 2D `Tensor` (matrix) M of shape (input_dim, output_dim).
"""
if not isinstance(x, tf.Tensor):
raise TypeError('Expected a Tensor, but got %r' % x)
x.shape.assert_has_rank(2)
input_dim = x.shape.as_list()[1]
shape = (input_dim, output_dim)
spec = _PCACombinerSpec(output_dim, dtype)
return combine_analyzer(
[x], [dtype], [shape], spec,
name if name is not None else 'pca')[0]
|
a4103450fd1d6c2175be5c546a75a9a3fe217786
| 3,648,047
|
def preorder(root: Node):
"""
Pre-order traversal visits root node, left subtree, right subtree.
>>> preorder(make_tree())
[1, 2, 4, 5, 3]
"""
return [root.data] + preorder(root.left) + preorder(root.right) if root else []
|
af68cfc6d4434cad7908125504592253e6bacf46
| 3,648,048
|
import uuid
def generate_uuid() -> str:
"""
Generate UUIDs to use as `sim.base_models.Node` and `sim.base_models.Item` ids.
"""
return str(uuid.uuid4())
|
9428676bb633873a2f32c53172146486f1421234
| 3,648,050
|
def new_key_generator():
"""Generator of new keys.
Yields: str
"""
def _rnd_key():
return ''.join(nchoice(key_chars, size=next(key_lengths)))
while True:
key = _rnd_key()
while key in storage:
key = _rnd_key()
yield key
|
111f523a5548e4e99ae9f3dcdeeafc73e4037693
| 3,648,051
|
def _style_mixture(which_styles, num_styles):
"""Returns a 1-D array mapping style indexes to weights."""
if not isinstance(which_styles, dict):
raise ValueError('Style mixture must be a dictionary.')
mixture = np.zeros([num_styles], dtype=np.float32)
for index in which_styles:
mixture[index] = which_styles[index]
return mixture
|
4bcb3dab052f171e514e9ba6abf10a74edcf612f
| 3,648,052
|
def merge_dicts(a, b):
"""combine two dictionaries, assuming components are arrays"""
result = a
for k, v in b.items():
if k not in result:
result[k] = []
result[k].extend(v)
return result
|
de465179faf1bd9ace312fa4b21d332ac994b72b
| 3,648,053
|
def parser_content_labelling_Descriptor(data,i,length,end):
"""\
parser_content_labelling_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "content_labelling", "contents" : unparsed_descriptor_contents }
(Defined in ETSI TS 102 323 specification)
"""
return { "type" : "content_labelling", "contents" : data[i+2:end] }
|
1aa9c68fd186df4dbda7200f2b40f617479a09d9
| 3,648,054
|
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (_BatchNorm, )):
return True
return False
|
0f43c3691f3f5aeddd0ce21ff726cfa3ad25a856
| 3,648,055
|
def scheduler(system = system()):
"""Job scheduler for OLCF system."""
if not is_olcf_system(system):
raise RuntimeError('unknown system (' + system + ')')
return _system_params[system].scheduler
|
5456cce3dc6ac46ac979983bb77c1645b05ba7f2
| 3,648,056
|
def simple_decoder_fn_inference(output_fn, encoder_state, embeddings,
start_of_sequence_id, end_of_sequence_id,
maximum_length, num_decoder_symbols,
dtype=dtypes.int32, name=None):
""" Simple decoder function for a sequence-to-sequence model used in the
`dynamic_rnn_decoder`.
The `simple_decoder_fn_inference` is a simple inference function for a
sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is
in the inference mode.
The `simple_decoder_fn_inference` is called with a set of the user arguments
and returns the `decoder_fn`, which can be passed to the
`dynamic_rnn_decoder`, such that
```
dynamic_fn_inference = simple_decoder_fn_inference(...)
outputs_inference, state_inference = dynamic_rnn_decoder(
decoder_fn=dynamic_fn_inference, ...)
```
Further usage can be found in the `kernel_tests/seq2seq_test.py`.
Args:
output_fn: An output function to project your `cell_output` onto class
logits.
An example of an output function;
```
tf.variable_scope("decoder") as varscope
output_fn = lambda x: layers.linear(x, num_decoder_symbols,
scope=varscope)
outputs_train, state_train = seq2seq.dynamic_rnn_decoder(...)
logits_train = output_fn(outputs_train)
varscope.reuse_variables()
logits_inference, state_inference = seq2seq.dynamic_rnn_decoder(
output_fn=output_fn, ...)
```
If `None` is supplied it will act as an identity function, which
might be wanted when using the RNNCell `OutputProjectionWrapper`.
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
embeddings: The embeddings matrix used for the decoder sized
`[num_decoder_symbols, embedding_size]`.
start_of_sequence_id: The start of sequence ID in the decoder embeddings.
end_of_sequence_id: The end of sequence ID in the decoder embeddings.
maximum_length: The maximum allowed of time steps to decode.
num_decoder_symbols: The number of classes to decode at each time step.
dtype: (default: `dtypes.int32`) The default data type to use when
handling integer objects.
name: (default: `None`) NameScope for the decoder function;
defaults to "simple_decoder_fn_inference"
Returns:
A decoder function with the required interface of `dynamic_rnn_decoder`
intended for inference.
"""
with ops.name_scope(name, "simple_decoder_fn_inference",
[output_fn, encoder_state, embeddings,
start_of_sequence_id, end_of_sequence_id,
maximum_length, num_decoder_symbols, dtype]):
start_of_sequence_id = ops.convert_to_tensor(start_of_sequence_id, dtype)
end_of_sequence_id = ops.convert_to_tensor(end_of_sequence_id, dtype)
maximum_length = ops.convert_to_tensor(maximum_length, dtype)
num_decoder_symbols = ops.convert_to_tensor(num_decoder_symbols, dtype)
encoder_info = nest.flatten(encoder_state)[0]
batch_size = encoder_info.get_shape()[0].value
if output_fn is None:
output_fn = lambda x: x
if batch_size is None:
batch_size = array_ops.shape(encoder_info)[0]
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
""" Decoder function used in the `dynamic_rnn_decoder` with the purpose of
inference.
The main difference between this decoder function and the `decoder_fn` in
`simple_decoder_fn_train` is how `next_cell_input` is calculated. In this
decoder function we calculate the next input by applying an argmax across
the feature dimension of the output from the decoder. This is a
greedy-search approach. (Bahdanau et al., 2014) & (Sutskever et al., 2014)
use beam-search instead.
Args:
time: positive integer constant reflecting the current timestep.
cell_state: state of RNNCell.
cell_input: input provided by `dynamic_rnn_decoder`.
cell_output: output of RNNCell.
context_state: context state provided by `dynamic_rnn_decoder`.
Returns:
A tuple (done, next state, next input, emit output, next context state)
where:
done: A boolean vector to indicate which sentences has reached a
`end_of_sequence_id`. This is used for early stopping by the
`dynamic_rnn_decoder`. When `time>=maximum_length` a boolean vector with
all elements as `true` is returned.
next state: `cell_state`, this decoder function does not modify the
given state.
next input: The embedding from argmax of the `cell_output` is used as
`next_input`.
emit output: If `output_fn is None` the supplied `cell_output` is
returned, else the `output_fn` is used to update the `cell_output`
before calculating `next_input` and returning `cell_output`.
next context state: `context_state`, this decoder function does not
modify the given context state. The context state could be modified when
applying e.g. beam search.
"""
with ops.name_scope(name, "simple_decoder_fn_inference",
[time, cell_state, cell_input, cell_output,
context_state]):
if cell_input is not None:
raise ValueError("Expected cell_input to be None, but saw: %s" %
cell_input)
if cell_output is None:
# invariant that this is time == 0
next_input_id = array_ops.ones([batch_size,], dtype=dtype) * (
start_of_sequence_id)
done = array_ops.zeros([batch_size,], dtype=dtypes.bool)
cell_state = encoder_state
cell_output = array_ops.zeros([num_decoder_symbols],
dtype=dtypes.float32)
else:
cell_output = output_fn(cell_output)
next_input_id = math_ops.cast(
math_ops.argmax(cell_output, 1), dtype=dtype)
done = math_ops.equal(next_input_id, end_of_sequence_id)
next_input = array_ops.gather(embeddings, next_input_id)
# if time > maxlen, return all true vector
done = control_flow_ops.cond(math_ops.greater(time, maximum_length),
lambda: array_ops.ones([batch_size,], dtype=dtypes.bool),
lambda: done)
return (done, cell_state, next_input, cell_output, context_state)
return decoder_fn
|
72436f6d866d78c5dc0c867fa950312f1baab0ef
| 3,648,057
|
def listdir(path):
"""listdir(path) -> list_of_strings
Return a list containing the names of the entries in the directory.
path: path of directory to list
The list is in arbitrary order. It does not include the special
entries '.' and '..' even if they are present in the directory.
"""
l = File(path).list()
if l is None:
raise OSError(0, 'No such directory', path)
return list(l)
|
d9096c979ffaa7c6d399822f4cf8aebc7e520415
| 3,648,058
|
import pathlib
def get_scripts_folder():
"""
return data folder to use for future processing
"""
return (pathlib.Path(__file__).parent.parent)
|
db236b35a06a0506f441ce6f11d8d93807592b04
| 3,648,059
|
def preresnet110(**kwargs):
"""Constructs a PreResNet-110 model.
"""
model = PreResNet(Bottleneck, [18, 18, 18], **kwargs)
return model
|
814fc1aa82bd06155311d37a13a1108684a861ae
| 3,648,060
|
def if_action(hass, config):
""" Wraps action method with state based condition. """
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is None:
_LOGGER.error("Missing configuration key %s", CONF_VALUE_TEMPLATE)
return False
return lambda: _check_template(hass, value_template)
|
d696ddd624b37f2dc8114b7da40a902409d8d61d
| 3,648,061
|
from typing import Union
from typing import List
import re
def get_indy_cli_command_output(output: str, match: str,
return_line_offset: int = 1, remove_ansi_escape_sequences: bool = True,
multi: bool = False) -> Union[List[str],str]:
"""
Get the output for a specific indy cli command from STDOUT captured calling
indy-cli from python.
:param output: STDOUT from a batch call to indy-cli from python.
Required.
:type output: str
:param match: Find the first line in output that contains this string and
return the next line from the output.
Required.
:type match: str
:param return_line_offset: Find the first line in output that contains this
string and return the next line from the output.
Required.
:type return_line_offset: int
"""
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
matches = []
lines = iter(output.decode().split("\n"))
for line in lines:
if match in line:
count = return_line_offset
# Skip return_line_offset lines
while(count > 0):
line = lines.__next__()
count -= 1
# Return a single line just after return_line_offset lines have been
# skipped
if remove_ansi_escape_sequences:
line = ansi_escape.sub('', line)
matches.append(line)
# Search for multiple matches?
if multi:
# Continue finding matches
continue
break
if multi:
return matches
else:
try:
return matches[0]
except IndexError:
return None
|
ff417bcace255cb89a5f4c9850939b18c7a2007f
| 3,648,062
|
def reports():
"""Returns all reports in the system"""
reports = crud.report.get_reports()
return reports
|
489b13cea39f6cfdd5e799e0586f81cc60cf0627
| 3,648,063
|
import re
def parse_signature(signature):
"""
Parses one signature
:param signature: stanc3 function signature
:return: return type, fucntion name and list of function argument types
"""
return_type, rest = signature.split(" ", 1)
function_name, rest = rest.split("(", 1)
args = re.findall(r"(?:[(][^()]+[)][^,()]+)|(?:[^,()]+(?:,*[]])?)", rest)
args = [i.strip() for i in args if i.strip()]
return return_type, function_name, args
|
11da2fb6008274f8d9a959651a181f127c85a34e
| 3,648,065
|
import torch
import time
def compute_fps(model, shape, epoch=100, device=None):
"""
frames per second
:param shape: 输入数据大小
"""
total_time = 0.0
if device:
model = model.to(device)
for i in range(epoch):
data = torch.randn(shape)
if device:
data = data.to(device)
start = time.time()
outputs = model(data)
end = time.time()
total_time += (end - start)
return total_time / epoch
|
b2e80cf695fe4e4be8890c4f28db8ae37e2f8dfe
| 3,648,067
|
def onboarding_ml_app_patterns_post(ml_app_pattern): # noqa: E501
"""Create a new MLApp pattern
# noqa: E501
:param ml_app_pattern: MLApp pattern detail description
:type ml_app_pattern: dict | bytes
:rtype: MLAppPattern
"""
if connexion.request.is_json:
ml_app_pattern = MLAppPattern.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
9d72c61c9fa04e387db72f67acf3bab9f88caaad
| 3,648,068
|
def get_stats(ns_profnum, clear=False, **kwargs):
""""
Returns and optionally clears the Polyglot-to-ISY stats
:param ns_profnum: Node Server ID (for future use)
:param clear: optional, zero out stats if True
"""
global SLOCK, STATS
SLOCK.acquire()
st = STATS
if clear:
STATS['ntotal'] = 0
STATS['rtotal'] = 0
STATS['oktotal'] = 0
STATS['ertotal'] = 0
STATS['ettotal'] = 0.0
STATS['ethigh'] = 0.0
STATS['etlow'] = 0.0
SLOCK.release()
#_LOGGER.info('get_stats(): %d %f %d', st['ntotal'], st['ettotal'], st['rtotal'])
return st
|
e8dbd350b9d9befe7b2c584c84f268a0c27decd5
| 3,648,069
|
def read_log_json():
""" Get all log documents/records from MondoDB """
limit = int(demisto.args().get('limit'))
# Point to all the documents
cursor = COLLECTION.find({}, {'_id': False}).limit(limit)
# Create an empty log list
entries = []
# Iterate through those documents
if cursor is not None:
for i in cursor:
# Append log entry to list
entries.append(i)
return_json = {COLLECTION_NAME: entries}
human_readable = tableToMarkdown(f'The log documents/records for collection "{COLLECTION_NAME}"', return_json)
return human_readable, {}, {}
return 'MongoDB - no documents/records - Log collection is empty', {}, {}
|
febef55ff06360148f5c87a875fd3f001e0f778a
| 3,648,070
|
import json
def RecogniseForm(access_token, image, templateSign=None, classifierId=None):
"""
自定义模板文字识别
:param access_token:
:param image:图像数据(string),base64编码,注意大小不超过4M,最短边至少15px,最长边最大4096px,支持jpg/png/bmp格式
:param templateSign:模板ID(string)
:param classifierId:分类器ID(int),这个参数与templateSign至少存在一个,优先使用templateSign,存在templateSign时,使用指定模板;如果没有templateSign而有classifierld,表示使用分类器去判断使用模板
:return:返回识别结果
"""
host = 'https://aip.baidubce.com/rest/2.0/solution/v1/iocr/recognise?access_token=%s' % access_token
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
formdata = {'image': image}
if templateSign is not None:
formdata['templateSign'] = templateSign
if classifierId is not None:
formdata['classifierId'] = classifierId
data = parse.urlencode(formdata).encode('utf8')
req = request.Request(method='POST', url=host, headers=headers, data=data)
response = request.urlopen(req)
if (response.status == 200):
jobj = json.loads(response.read().decode())
datas = jobj['data']
recognise = {}
for obj in datas['ret']:
recognise[obj['word_name']] = obj['word']
return recognise
|
9a316a595c65b805970b1c0e5324b2b5c9342b31
| 3,648,071
|
from typing import Callable
def rectangle(
func: Callable[..., float], a: float, b: float, eps: float = 0.0001, *args, **kwargs
) -> float:
"""
Metode segi empat adalah metode integrasi paling sederhana.
Metode ini membagi domain integrasi sebanyak n buah
dan menjumlahkan seluruh luas segi empat dengan dimensi
(a + b)/n * f(x), dimana x adalah sudut kiri segiempat
Parameter:
func = fungsi input
a = batas bawah integrasi
b = batas atas integrasi
eps = error relatif maksimal
>>> rectangle(lambda x: x**2, 0, 2)
2.6992751228325296
>>> rectangle(lambda x: x**1, 0, 2)
2.0198019801980194
"""
try:
n = 100
x = np.linspace(a, b, n)
dx = x[1] - x[0]
L0 = sum(dx * func(i, *args, **kwargs) for i in x)
err = 1
while err > eps:
n += 1
x = np.linspace(a, b, n)
dx = x[1] - x[0]
L1 = sum(dx * func(i, *args, **kwargs) for i in x)
err = np.abs(L1 - L0) / np.abs(L1)
L0 = L1
except Exception:
raise RuntimeError("Integrasi gagal, pastikan fungsi anda benar!")
return L1
|
11e844f59ce5bcbdffed59de0188c63eec7571ac
| 3,648,072
|
def _rearrange_axis(data: np.ndarray,
axis: int = 0) -> tuple([np.ndarray, tuple]):
"""rearranges the `numpy.ndarray` as a two-dimensional array of size (n,
-1), where n is the number of elements of the dimension defined by `axis`.
Parameters
----------
data : :class:`numpy.ndarray`
An array to be rearranged
axis : :class:`int`, Optional
The axis that all other dimensions are rearranged around it. Defaults to 0.
Returns
-------
tuple (data :class:`numpy.ndarray`, shape :class:`tuple`
A tuple, where the first element contains the reshaped data, and the second is a tuple with all dimensions except the one specified by the axis.
"""
if not isinstance(data, np.ndarray):
raise TypeError("data must be a numpy.ndarray.")
axis = _check_axis(axis, data.ndim)
if axis != 0:
data = np.moveaxis(data, axis, 0)
trailing_shape = data.shape[1:]
data = data.reshape((data.shape[0], -1))
return data, trailing_shape
|
07310b1ddc287f949f439dcb1ac66b8b7d41e421
| 3,648,073
|
def landmarks_json():
"""Send landmark data for map layer as Geojson from database."""
features = []
for landmark in Landmark.query.all():
# get the first image of a landmark, if any
image = ""
if len(landmark.images) > 0:
image = landmark.images[0].imageurl
# get the average rating of a landmark
avg_rating = ""
rating_scores = [r.user_score for r in landmark.ratings]
if len(rating_scores) > 0:
avg_rating = float(sum(rating_scores))/len(rating_scores)
features.append({
"type": "Feature",
"properties": {
"name": landmark.landmark_name,
"description": landmark.landmark_description,
"artist": landmark.landmark_artist,
"display-dimensions": landmark.landmark_display_dimensions,
"location-description": landmark.landmark_location_description,
"medium": landmark.landmark_medium
},
"geometry": {
"coordinates": [
landmark.landmark_lng,
landmark.landmark_lat],
"type": "Point"
},
"id": landmark.landmark_id,
'image': image,
'avg_rating': avg_rating,
})
landmarks_geojson = {
"type": "FeatureCollection",
"features": features,
}
return jsonify(landmarks_geojson)
|
9ab914c3c5e54728f4ec5346c32acbb846091244
| 3,648,074
|
def extractTuples(data):
""" Saca las tuplas (palabra,prediccion),
y las devuelve como dos arrays entradas y
salidas """
inp = []
out = []
for r in data:
for i in range(len(r)):
for j in range(-CONTEXT_WINDOW,CONTEXT_WINDOW+1):
if j == CONTEXT_WINDOW or i+j <0 or i+j >= len(r):
continue
inp.append(r[i])
out.append(r[i+j])
return inp,out
|
191e980808f65c6f4acd116e14eba44f0f1792a4
| 3,648,075
|
def _GetKeyKind(key):
"""Return the kind of the given key."""
return key.path().element_list()[-1].type()
|
c37f1d889e484390449de682e3d6c6b9d4521ce4
| 3,648,076
|
import yaml
def load_config() -> dict:
"""
Loads the config.yml file to memory and returns it as dictionary.
:return: Dictionary containing the config.
"""
with open('config.yml', 'r') as ymlfile:
return yaml.load(ymlfile, Loader=yaml.FullLoader)
|
6e05aa4eb6a7d9862814f595ecdc89ffab145ee5
| 3,648,077
|
from typing import Tuple
from typing import List
from typing import Sequence
def find_column_equivalence(matrix, do_not_care) -> Tuple[List[int], List[Sequence]]:
""" Adapt find_row_equivalence (above) to work on columns instead of rows. """
index, classes = find_row_equivalence(zip(*matrix), do_not_care)
return index, list(zip(*classes))
|
c47a3d2d3663fb0d52bdbf81b111416ab1f2f41d
| 3,648,078
|
import inspect
def RegisterSpecs(func):
"""The decorator to register the specification for each check item object.
The decorator first tests whether it is involved in the outmost call of the
check item object. If so, it then goes through the args, kwargs, and defaults
to populate the specification.
Args:
func: The __init__ function of a check item class.
Returns:
The wrapper function.
"""
def Wrapper(self, *args, **kwargs):
"""The function wrapper to extract argument for CheckItems."""
frame_stack = inspect.stack()
if len(frame_stack) > 1:
# Check if the caller is a method of BaseCheckItem. If so,
# `func` itself is not the outmost call to extract parameters.
frame = frame_stack[1][0]
frame_locals = frame.f_locals
if ('self' in frame_locals and
isinstance(frame_locals['self'], BaseCheckItem)):
return func(self, *args, **kwargs)
# Record the args and kwargs into a dict.
params = {}
# Get the arguments for the function.
# Example:
# def f(a, b=1, c=2, *pos, **named):
# pass
# This returns:
# ArgSpec(args=['a', 'b', 'c'], varargs='pos', keywords='named',
# defaults=(1, 2))
argspec = inspect.getargspec(func)
# If an arg has default, the corresponding index in the `defaults` array is
# N - (num_args_without_default), where N is the index of the arg in
# the `args` array.
# We started N as 1 to count for the `self` argument.
default_idx = (1 - (len(argspec.args) - len(argspec.defaults))
if argspec.defaults else None)
# For class member functions, the first item in args is `self`. Skip it.
for idx, arg_name in enumerate(argspec.args[1:]):
if idx < len(args):
arg_value = args[idx]
elif arg_name in kwargs:
arg_value = kwargs[arg_name]
elif (argspec.defaults and default_idx >= 0
and default_idx < len(argspec.defaults)):
arg_value = argspec.defaults[default_idx]
else:
raise ValueError('Missing argument "%s" for "%s".'
% (arg_name, self.__class__.__name__))
if argspec.defaults:
default_idx += 1
if isinstance(arg_value, check_range.BaseRange):
arg_value = arg_value.RawArgs()
params[arg_name] = arg_value
# Assign the parameters.
self.SetSpecs(params)
# Call the original function.
obj = func(self, *args, **kwargs)
return obj
return Wrapper
|
15c8275968a510a7e57993446a7b5f9f7eadeff4
| 3,648,080
|
from typing import Set
from pathlib import Path
def get_changes_to_be_committed() -> Set[Path]:
"""After every time `add` is performed, the filepath is added to this text file."""
return {Path(path) for path in path_to.changes_to_be_committed.read_text().split("\n") if path}
|
84878432310511ae79dccebc7c66e0c658917616
| 3,648,081
|
def concatenate_weather_files(dir_path):
"""Concatenate all .nc files found in the directory set by path."""
# import all the files as datasets
fnames = get_weather_files(dir_path)
ds_list = []
for f in fnames:
with xr.open_dataset(f, engine='netcdf4') as ds:
ds_list.append(ds)
ds_main = xr.concat(ds_list, dim='time')
groups = ds_main.groupby('time')
return groups
|
c644a8d69b71c9cd06467757f1cddc5589ff3680
| 3,648,082
|
def homepage(module=None, *match, **attr):
"""
Shortcut for module homepage menu items using the MM layout,
retrieves the module's nice name.
@param module: the module's prefix (controller)
@param match: additional prefixes
@param attr: attributes for the navigation item
"""
settings = current.deployment_settings
all_modules = settings.modules
layout = S3MainMenuDefaultLayout
c = [module] + list(match)
if "name" in attr:
name = attr["name"]
attr.pop("name")
else:
if module is None:
module = "default"
if module in all_modules:
m = all_modules[module]
name = m.name_nice
else:
name = module
if "f" in attr:
f = attr["f"]
del attr["f"]
else:
f = "index"
return layout(name, c=c, f=f, **attr)
|
5523f87b16ab09c18deb3f915d4feea29e269540
| 3,648,083
|
def handle_fallthrough(event, path, query):
"""
Handles the fallthrough cases where no redirects were matched
"""
# If no fallthough response provider, 302 the whole website to the HOST that
# was input
if variables.FALLTHROUGH == None:
return redirect('//' + variables.HOST + path + query)
# If we asked to fallthrough to the origin, just return the original request
# so that Cloudfront continues on its merry way
elif variables.FALLTHROUGH == 'origin':
return event['Records'][0]['cf']['request']
# Otherwise use the fallthrough as is
else:
return variables.FALLTHROUGH
|
4568aeec2061eb947839d63472c23b565afd3c0c
| 3,648,084
|
def vowel_space_area(F1a, F1i, F1u, F2a, F2i, F2u):
"""
Return vowel space area
Args:
F1a: (float) the 1. formant frequency of the vowel /a [Hz]
F1i: (float) the 1. formant frequency of the vowel /i [Hz]
F1u: (float) the 1. formant frequency of the vowel /u [Hz]
F2a: (float) the 1. formant frequency of the vowel /a [Hz]
F2i: (float) the 1. formant frequency of the vowel /i [Hz]
F2u: (float) the 1. formant frequency of the vowel /u [Hz]
Returns:
VSA: (float) vowel space area
"""
# Compute vowel space area
EDiu = np.sqrt((F1i-F1u)**2+(F2i-F2u)**2)
EDia = np.sqrt((F1i-F1a)**2+(F2i-F2a)**2)
EDau = np.sqrt((F1a-F1u)**2+(F2a-F2u)**2)
S = (EDiu+EDia+EDau)/(2.0)
VSA = np.sqrt(S*(S-EDiu)*(S-EDia)*(S-EDau))
# Return vowel space area
return float(VSA)
|
59649803154fdddcfd6173f68fcf4e9054f34343
| 3,648,085
|
def get_hosted_zone(domain):
"""Return a domain's hosted zone."""
return api.get(f"/api/domain/{domain['_id']}/records/")
|
7d3d91a81563ab6918a85c39fb47b4e28de4f5d5
| 3,648,086
|
def align_spikes(spike_data, spt_dict, sp_win, type="max", resample=1,
contact=0, remove=True):
"""Aligns spike waves and returns corrected spike times
Parameters
----------
spike_data : dict
spt_dict : dict
sp_win : list of int
type : {'max', 'min'}, optional
resample : int, optional
contact : int, optional
remove : bool, optiona
Returns
-------
ret_dict : dict
spike times of aligned spikes
"""
spt = spt_dict['data'].copy()
idx_align = np.arange(len(spt))
#spt_align = {'data': spt}
#go in a loop until all spikes are correctly aligned
iter_id = 0
while len(idx_align) > 0:
spt_align = {'data': spt[idx_align]}
spt_inbound = filter_spt(spike_data, spt_align, sp_win)
idx_align = idx_align[spt_inbound]
#spt_align = {'data': spt[idx_align]}
sp_waves_dict = extract_spikes(spike_data, spt_align, sp_win,
resample=resample, contacts=contact)
sp_waves = sp_waves_dict['data'][:,spt_inbound,0]
#if sp_waves_dict.has_key('is_valid'):
# sp_waves = sp_waves[:, sp_waves_dict['is_valid']]
time = sp_waves_dict['time']
if type=="max":
i = sp_waves.argmax(0)
elif type=="min":
i = sp_waves.argmin(0)
#move spike markers
shift = time[i]
spt[idx_align]+=shift
#if spike maximum/minimum was at the edge we have to extract it at the
# new marker and repeat the alignment
tol = 0.1
idx_align = idx_align[(shift<(sp_win[0]+tol)) | (shift>(sp_win[1]-tol))]
iter_id +=1
#print "Align. iteration %d, remaining idx %d" % (iter_id, len(idx_align))
#print shift
ret_dict = {'data':spt}
if remove:
#remove double spikes
FS = spike_data['FS']
ret_dict = remove_doubles(ret_dict, 1000./FS)
return ret_dict
|
38c62474e6c19c7492066254a7075ff4a34573ca
| 3,648,087
|
def rgb2hsi(rgb: np.ndarray,
*,
axis: int=None) -> np.ndarray:
"""
Convert RGB to Hue Saturation Intensity
:param rgb:
:param axis:
:return:
"""
if axis is None:
axis = get_matching_axis(rgb.shape, 3)
big_m, little_m, chroma = _compute_chroma(rgb, axis)
inds = construct_component_inds(axis, rgb.ndim, 3)
hsi = np.zeros(rgb.shape)
hsi[inds[0]] = _compute_rgb_hue(rgb, big_m, little_m, chroma, axis)
hsi[inds[2]] = np.mean(rgb, axis=axis, keepdims=True)
i_nz = hsi[inds[2]] != 0 # type: np.ndarray
if little_m.ndim < i_nz.ndim:
# This only happens in the 1D case
little_m = little_m[slice(None), np.newaxis]
if np.any(i_nz):
hsi[inds[1]][i_nz] = 1 - little_m[i_nz] / hsi[inds[2]][i_nz]
return hsi
|
55734347d1d102a183dd736a4d832b33ce17115c
| 3,648,088
|
def get_movie_brief_actor(actor, soup):
"""
Getting brief data from individual movie webpage (for actor dictionary)
"""
headers=['actor','title','year','rating','vote','genre_list','budget','opening','gross_usa',\
'gross_cw','runtime','director','writer','star','distributor']
# actor of interest
actor = actor
# find movie title
title = " ".join(soup.find('h1').text.split()[:-1])
# find rating
rating = np.nan
try:
rating = float(soup.find('span',attrs={'itemprop':'ratingValue'}).text)
except:
pass
# find vote (rating count)
vote = np.nan
try:
vote = int(soup.find('span',attrs={'itemprop':'ratingCount'}).text.replace(',',''))
except:
pass
# find list of genre
genre_list=[]
try:
for genres in soup.find('div', class_="subtext").find_all('a')[:-1]:
genre_list.append(genres.text)
except:
pass
# find release date
date = np.nan
try:
date_pre = soup.find('div', class_="subtext").find_all('a')[-1].text.split('(')[0]
date = pd.to_datetime(date_pre) ## why is it Timestamp? format ='%d-%B-%Y'
except:
pass
# # find metascorre
# if soup.find('div',class_="metacriticScore score_favorable titleReviewBarSubItem") is not None:
# meta = int(soup.find('div',class_="metacriticScore score_favorable titleReviewBarSubItem").text.strip('\n'))
# else:
# meta = np.nan
# # find plot keywords
# keyword_list=[]
# for keywords in soup.find('div', class_="article", id="titleStoryLine").\
# find('div', class_="see-more inline canwrap").find_all('a')[:-1]:
# keyword_list.append(keywords.text.strip(' '))
# find budget, opening weekend USA, gross USA, cumulative worldwide gross
# assign default value:
budget, opening, gross_usa, gross_cw, distributor = np.nan, np.nan, np.nan, np.nan, np.nan
try:
for line in soup.find('div', class_="article", id="titleDetails").find_all('h4'):
if "Budget:" in line:
budget = int(''.join(s for s in line.next_sibling if s.isdigit()))
if "Opening Weekend USA:" in line:
opening = int(''.join(s for s in line.next_sibling if s.isdigit()))
if "Gross USA:" in line:
gross_usa = int(''.join(s for s in line.next_sibling if s.isdigit()))
if "Cumulative Worldwide Gross:" in line:
gross_cw = int(''.join(s for s in line.next_sibling if s.isdigit()))
if "Production Co:" in line:
distributor = line.findNext().text.replace(' ','')
except:
pass
# find runtime
runtime = np.nan
try:
runtime = int(soup.find_all('time')[-1].text.strip(' min'))
except:
pass
# find director
director= np.nan
try:
director = soup.find('div',class_="credit_summary_item").find('a').text
link_d = soup.find('div',class_="credit_summary_item").find('a').get('href')
except:
pass
# find writer
writer = np.nan
try:
writer_line = soup.find_all('div',class_="credit_summary_item")[1].find_all('a')
link_w = [w.get('href') for w in writer_line]
writer = [w.text for w in writer_line]
if '1 more credit' in writer:
writer.remove('1 more credit')
link_w.pop()
except:
pass
# find star
star = np.nan
try:
star_line = soup.find_all('div',class_="credit_summary_item")[2].find_all('a')
link_s = [s.get('href') for s in star_line]
star = [s.text for s in star_line]
if 'See full cast & crew' in star:
star.remove('See full cast & crew')
link_s.pop()
except:
pass
# # find language
# language= np.nan
# t= []
# matching = []
# for div in soup.find('div', class_="article", id="titleDetails").find_all('div'):
# t.append(div.text.replace('\n','').replace(' ',''))
# matching = [s for s in t if 'Language:' in s]
# language = matching[0].replace(':',' ').replace('|',' ').split(' ')[1:]
# # find country
# country= np.nan
# t= []
# matching = []
# for div in soup.find('div', class_="article", id="titleDetails").find_all('div'):
# t.append(div.text.replace('\n','').replace(' ',''))
# matching = [s for s in t if 'Country:' in s]
# country = matching[0].replace(':',' ').replace('|',' ').split(' ')[1:]
movie_dict = dict(zip(headers, [actor,
title,
date,
rating,
vote,
genre_list,
budget,
opening,
gross_usa,
gross_cw,
runtime,
director,
writer,
star,
distributor]))
return movie_dict
|
ceaaa05ae26681eddcc8f9ed1a28f43bcc83473f
| 3,648,089
|
def mul(a: TensorableType, b: TensorableType) -> Tensor:
"""Returns the product of input tensor_objects with their local gradients"""
a = enforceTensor(a)
b = enforceTensor(b)
output = Tensor(a.data * b.data, requires_grad=(a.requires_grad or b.requires_grad))
output.save_for_backward([a, b])
def backward_fn():
if a.requires_grad:
a_local_gradient = output.grad.data * b.data
a_local_gradient = manageBroadcasting(a.ndim, a.shape, a_local_gradient)
a.grad.data += a_local_gradient
if b.requires_grad:
b_local_gradient = output.grad.data * a.data
b_local_gradient = manageBroadcasting(b.ndim, b.shape, b_local_gradient)
b.grad.data += b_local_gradient
output.backward_fn = backward_fn
return output
|
dfa75b511b29360589bec594ef464b804dfa298b
| 3,648,090
|
def split_multi(vds: 'VariantDataset', *, filter_changed_loci: bool = False) -> 'VariantDataset':
"""Split the multiallelic variants in a :class:`.VariantDataset`.
Parameters
----------
vds : :class:`.VariantDataset`
Dataset in VariantDataset representation.
filter_changed_loci : :obj:`bool`
If any REF/ALT pair changes locus under :func:`.min_rep`, filter that
variant instead of throwing an error.
Returns
-------
:class:`.VariantDataset`
"""
variant_data = hl.experimental.sparse_split_multi(vds.variant_data, filter_changed_loci=filter_changed_loci)
return VariantDataset(vds.reference_data, variant_data)
|
4c15b45806fbbb6782a27a2d123529e9b63ef622
| 3,648,091
|
from theano import tensor as T
import tensorflow as tf
def zeros(shape, dtype=K.floatx()):
"""Return all-zeros tensor of given shape and type."""
# As of Keras version 1.1.0, Keras zeros() requires integer values
# in shape (e.g. calling np.zeros() with the Theano backend) and
# thus can't be called with tensor values. This version avoids the
# issue by using the backend zeros() instead.
if K.backend() == 'theano':
return T.zeros(shape, dtype)
else:
assert K.backend() == 'tensorflow'
return tf.zeros(shape, dtype)
|
5e48871e8c0183d6feb3f68b2275fb5abc7ff55e
| 3,648,092
|
def accretion_cylinder(mbh, mdot, r):
"""rschw, omega, facc, teff, zscale = accretion_cylinder(mbh, mdot, r)"""
GM = cgs_graw * mbh * sol_mass
rschw = 2 * GM / cgs_c**2
omega = sqrt( GM / (r * rschw)**3 )
facc = 3 * GM * (mdot * mdot_edd(mbh)) / (8 * pi * (r * rschw)**3) \
* (1 - sqrt(3 / r))
teff = ( facc / cgs_stef )**0.25
zscale = sqrt( 2 * cgs_k_over_mh * teff ) / omega
return rschw, omega, facc, teff, zscale
|
b89573fc361d83dbb288cfa0760e844e611fac44
| 3,648,094
|
def cost_function_wrapper(theta, cost_function_parameters):
"""Wrapper for the Cost Function"""
cost_function_parameters['theta'] = theta
return cost_function(cost_function_parameters)
|
1bd2e5e403514590e174ddd4198880320be80b99
| 3,648,095
|
def make_img_tile(imgs, path, epoch, aspect_ratio=1.0,
tile_shape=None, border=1, border_color=0):
"""
"""
if imgs.ndim != 3 and imgs.ndim != 4:
raise ValueError('imgs has wrong number of dimensions.')
n_imgs = imgs.shape[0]
tile_shape = None
# Grid shape
img_shape = np.array(imgs.shape[1:3])
if tile_shape is None:
img_aspect_ratio = img_shape[1] / float(img_shape[0])
aspect_ratio *= img_aspect_ratio
tile_height = int(np.ceil(np.sqrt(n_imgs * aspect_ratio)))
tile_width = int(np.ceil(np.sqrt(n_imgs / aspect_ratio)))
grid_shape = np.array((tile_height, tile_width))
else:
assert len(tile_shape) == 2
grid_shape = np.array(tile_shape)
# Tile image shape
tile_img_shape = np.array(imgs.shape[1:])
tile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border
# Assemble tile image
tile_img = np.empty(tile_img_shape)
tile_img[:] = border_color
for i in range(grid_shape[0]):
for j in range(grid_shape[1]):
img_idx = j + i*grid_shape[1]
# No more images - stop filling out the grid.
if img_idx >= n_imgs:
break
# Convert 1~1 to 0~1
img = (imgs[img_idx] + 1)/2.0# * 255.0
yoff = (img_shape[0] + border) * i
xoff = (img_shape[1] + border) * j
tile_img[yoff:yoff+img_shape[0], xoff:xoff+img_shape[1], ...] = img
img_tile = Image.fromarray(np.uint8(tile_img * 255) , 'L')
if path is not None:
path_name = path + "/iteration_%03d"%(epoch)+".jpg"
img_tile.save(path_name)
return img_tile
|
0e468b0d99e4419e8cbb4c53dce5ea1cfcbfb513
| 3,648,096
|
import pkg_resources
def dummy_plugin_distribution(dummy_plugin_distribution_name, save_sys_path):
"""Add a dummy plugin distribution to the current working_set."""
dist = pkg_resources.Distribution(
project_name=dummy_plugin_distribution_name,
metadata=DummyEntryPointMetadata(
f"""
[lektor.plugins]
dummy-plugin = {__name__}:DummyPlugin
"""
),
version="1.23",
location=__file__,
)
pkg_resources.working_set.add(dist)
return dist
|
cb79ad0c4cd1b8824afe592cef38bc002f44087a
| 3,648,097
|
def parse_temperature_item(item):
"""Parse item for time and temperature
:param item: Definition, eg. '17.0 > 07:00'
:returns: dict with temperature and minutes"""
temp_time_tupel = item.split(">")
temperature = float(temp_time_tupel[0].strip())
minutes_from_midnight = calculate_minutes_from_midnight(
temp_time_tupel[1].strip())
return {'minutes_from_midnight': minutes_from_midnight,
'temperature': temperature}
|
ec1a8f501c392b68c559aa11d03995a44e09c1fd
| 3,648,098
|
from pathlib import Path
import json
def add_file_to_dataset_view(user_data, cache):
"""Add the uploaded file to cloned repository."""
ctx = DatasetAddRequest().load(request.json)
user = cache.ensure_user(user_data)
project = cache.get_project(user, ctx['project_id'])
if not ctx['commit_message']:
ctx['commit_message'] = 'service: dataset add {0}'.format(
ctx['short_name']
)
local_paths = []
for _file in ctx['files']:
local_path = None
if 'file_url' in _file:
commit_message = '{0}{1}'.format(
ctx['commit_message'], _file['file_url']
)
job = cache.make_job(user)
_file['job_id'] = job.job_id
with enqueue_retry(DATASETS_JOB_QUEUE) as queue:
queue.enqueue(
dataset_add_remote_file, user_data, job.job_id,
project.project_id, ctx['create_dataset'], commit_message,
ctx['short_name'], _file['file_url']
)
continue
if 'file_id' in _file:
file = cache.get_file(user, _file['file_id'])
local_path = file.abs_path
elif 'file_path' in _file:
local_path = project.abs_path / Path(_file['file_path'])
if not local_path or not local_path.exists():
return error_response(
INVALID_PARAMS_ERROR_CODE,
'invalid file reference: {0}'.format(json.dumps(_file))
)
ctx['commit_message'] += ' {0}'.format(local_path.name)
local_paths.append(str(local_path))
if local_paths:
with chdir(project.abs_path):
add_file(
local_paths,
ctx['short_name'],
create=ctx['create_dataset'],
force=ctx['force'],
commit_message=ctx['commit_message']
)
try:
_, ctx['remote_branch'] = repo_sync(
Repo(project.abs_path), remote='origin'
)
except GitCommandError:
return error_response(
INTERNAL_FAILURE_ERROR_CODE, 'repo sync failed'
)
return result_response(DatasetAddResponseRPC(), ctx)
|
bf253bbbfe183ef6f5a218831f17ba856d2cf2bc
| 3,648,099
|
def get_available_adapters() -> dict:
"""Get information on all available adapters
Returns:
(dict) Where keys are adapter names and values are descriptions
"""
return _output_plugin_info(ExtensionManager(namespace='materialsio.adapter'))
|
5384931189492a6369a885498f4dfeaed315cb94
| 3,648,100
|
def _must_find_n(session, obj_outer, cls_inner, name_inner):
"""Searches the database for a "namespaced" object, such as a nic on a node.
Raises NotFoundError if there is none. Otherwise returns the object.
Arguments:
session - a SQLAlchemy session to use.
obj_outer - the "owner" object
cls_inner - the "owned" class
name_inner - the name of the "owned" object
"""
obj_inner = _namespaced_query(session, obj_outer, cls_inner, name_inner)
if obj_inner is None:
raise NotFoundError("%s %s on %s %s does not exist." %
(cls_inner.__name__, name_inner,
obj_outer.__class__.__name__, obj_outer.label))
return obj_inner
|
25546e1f528c54ae7283f6a0d404065a337eb977
| 3,648,101
|
def list_providers():
"""
Get list of names of all supported cloud providers
:rtype: list
"""
return [cls.provider_name() for cls in BaseHandler.__subclasses__()]
|
96f75695a40c9969cbd1507b3b3214df44039f1e
| 3,648,102
|
def GetRPCProxy(address=None, port=None, url=GOOFY_RPC_URL):
"""Gets an instance (for client side) to access the goofy server.
Args:
address: Address of the server to be connected.
port: Port of the server to be connected.
url: Target URL for the RPC server. Default to Goofy RPC.
"""
address = address or DEFAULT_GOOFY_ADDRESS
port = port or DEFAULT_GOOFY_PORT
return jsonrpc.ServerProxy(
'http://%s:%d%s' % (address, port, url))
|
3e66abf1e6961c9d2dd2f2fa562e43528d6e99a4
| 3,648,103
|
def dict_hash_table_100_buckets():
"""Test for hash table with 100 buckets, dictionary."""
ht = HashTable(100, naive_hash)
for word in dictionary_words:
ht.set(word, word)
return ht
|
eef6fa89811f6ac9d1ad17d11dc5aa38346a4e16
| 3,648,105
|
def white(N):
"""
White noise.
:param N: Amount of samples.
White noise has a constant power density. It's narrowband spectrum is therefore flat.
The power in white noise will increase by a factor of two for each octave band,
and therefore increases with 3 dB per octave.
"""
return np.random.randn(N)
|
5404c24b0cb79e3866d10eed1630ccdfebe9fae1
| 3,648,106
|
import warnings
def read_HiCPro(bedfile, matfile):
"""
Fast loading of the .matrix and .bed files derived from HiC-Pro
Parameters
----------
bedfile : str,
path to the .bed file which contains fragments info
matfile : str,
path to the .matrix file which contains contact counts
Returns
-------
counts : the interaction contacts map
lengths : the lengths of each chromosomes
chrs : the chromosome names
"""
### read and parse fragments file at first
bed_df = pd.read_csv(bedfile, sep='\t', comment="#", header=None, names=['chrs', 'starts', 'ends', 'idxs'])
# get lengths for each chromosome
chrs, indices, lengths = np.unique(bed_df.chrs.values, return_index=True, return_counts=True)
chrs = list(chrs[indices.argsort()])
lengths = lengths[indices.argsort()]
base = bed_df.idxs[0] # start index: 0 or 1
### read and parse counts file then
n = lengths.sum()
shape = (n, n)
# This is the interaction count files
mat_df = pd.read_csv(matfile, sep='\t', comment="#", header=None)
row, col, data = mat_df.values.T
row = row.astype(int)
col = col.astype(int)
# If there are NAs remove them
mask = np.isnan(data)
if np.any(mask):
warnings.warn(f'NAs detected in {mat_file}. Removing NAs and replacing with 0.')
row = row[np.invert(mask)] # invert True and False for mask
col = col[np.invert(mask)]
data = data[np.invert(mask)].astype(int)
# if index start from 1
if base not in [0, 1]:
raise ValueError('indices should start either at 0 or 1')
if base == 1:
col -= 1
row -= 1
# convert to a coo_matrix (lower triangular)
counts = coo_matrix((data, (row, col)), shape=shape)
# whether the matrix is lower or upper triangular
if np.all(row <= col):
triangular_upper = True
elif np.all(row >= col):
triangular_upper = False
else:
raise ValueError('The HiC matrix is neither lower nor upper triangular!')
# We need to deal with the fact that we should not duplicate entries for the diagonal
counts = counts.toarray()
if triangular_upper:
counts = counts + np.triu(counts, 1).T
else:
counts = counts + np.tril(counts, -1).T
return counts, lengths, chrs
|
1c52cdf62a20d4ff8168919afbd890890653de93
| 3,648,107
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.