content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def uploadResourceFileUsingSession(url, session, resourceName, fileName, fullPath, scannerId):
"""
upload a file for the resource - e.g. a custom lineage csv file
works with either csv for zip files (.csv|.zip)
returns rc=200 (valid) & other rc's from the post
"""
print(
"uploading file for resource "
+ url
+ " resource="
+ resourceName
)
apiURL = url + "/access/1/catalog/resources/" + resourceName + "/files"
print("\turl=" + apiURL)
# header = {"accept": "*/*", }
params = {"scannerid": scannerId, "filename": fileName, "optionid": "File"}
print("\t" + str(params))
# files = {'file': fullPath}
mimeType = "text/csv"
readMode = "rt"
if fileName.endswith(".zip"):
mimeType = "application/zip"
readMode = "rb"
if fileName.endswith(".dsx"):
mimeType = "text/plain"
file = {"file": (fileName, open(fullPath, readMode), mimeType)}
# file = {"file": (fileName, open(fullPath, readMode), )}
print(f"\t{file}")
# print(f"session header:{session.headers}")
uploadResp = session.post(
apiURL,
data=params,
files=file,
)
print("\tresponse=" + str(uploadResp.status_code))
if uploadResp.status_code == 200:
# valid - return the json
return uploadResp.status_code
else:
# not valid
print("\tupload file failed")
print("\t" + str(uploadResp))
print("\t" + str(uploadResp.text))
return uploadResp.status_code
|
8a4a8c21563f1467db284f2e98dd1b48dbb65a3c
| 3,639,145
|
from typing import Literal
def read_inc_stmt(line: str) -> tuple[Literal["inc"], str] | None:
"""Attempt to read INCLUDE statement"""
inc_match = FRegex.INCLUDE.match(line)
if inc_match is None:
return None
inc_path: str = inc_match.group(1)
return "inc", inc_path
|
64ac4b53363a4aa5b9e2c4cf91b27f169ad0465c
| 3,639,146
|
def sent2vec(s, model):
"""
Transform a sentence to a vector.
Pre: No parameters may be None.
Args:
s: The sentence to transform.
model: A word2vec model.
Returns: A vector, representing the given sentence.
"""
words = word_tokenize(s.lower())
# Stopwords and numbers must be removed, as well as words that are not
# part of the model
M = [model[w] for w in words if w not in stop_words and w.isalpha() and w in model]
M = np.array(M)
if len(M) > 0:
v = M.sum(axis=0)
return v / np.sqrt((v ** 2).sum())
else:
# When the sentence is empty after removing unvalid tokens, the vector
# is equal to the null-vector
return model.get_vector('null')
|
1e61639cc27e3a430257ff3ac4b2a002a42cf177
| 3,639,148
|
def subnet_group_present(
name,
subnet_ids=None,
subnet_names=None,
description=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure ElastiCache subnet group exists.
.. versionadded:: 2015.8.0
name
The name for the ElastiCache subnet group. This value is stored as a lowercase string.
subnet_ids
A list of VPC subnet IDs for the cache subnet group. Exclusive with subnet_names.
subnet_names
A list of VPC subnet names for the cache subnet group. Exclusive with subnet_ids.
description
Subnet group description.
tags
A list of tags.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_elasticache.subnet_group_exists"](
name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile
)
if not exists:
if __opts__["test"]:
ret["comment"] = "Subnet group {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_elasticache.create_subnet_group"](
name=name,
subnet_ids=subnet_ids,
subnet_names=subnet_names,
description=description,
tags=tags,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not created:
ret["result"] = False
ret["comment"] = "Failed to create {} subnet group.".format(name)
return ret
ret["changes"]["old"] = None
ret["changes"]["new"] = name
ret["comment"] = "Subnet group {} created.".format(name)
return ret
ret["comment"] = "Subnet group present."
return ret
|
d7d441dcfacd92f33b4172e33299df398cfa3ba2
| 3,639,149
|
def GetTensorFlowVersion(vm):
"""Returns the version of tensorflow installed on the vm.
Args:
vm: the target vm on which to check the tensorflow version
Returns:
installed python tensorflow version as a string
"""
stdout, _ = vm.RemoteCommand(
('echo -e "import tensorflow\nprint(tensorflow.__version__)" | {0} python'
.format(GetEnvironmentVars(vm)))
)
return stdout.strip()
|
4380ec75f2b5713ab0ead31189cdd7b3f81c6b9b
| 3,639,150
|
import json
from typing import OrderedDict
def datetime_column_evrs():
"""hand-crafted EVRS for datetime columns"""
with open(
file_relative_path(__file__, "../fixtures/datetime_column_evrs.json")
) as infile:
return expectationSuiteValidationResultSchema.load(
json.load(infile, object_pairs_hook=OrderedDict)
)
|
c229f08250c51a805a15db653e3e70513a6f6e9a
| 3,639,152
|
def pd_df_timeseries():
"""Create a pandas dataframe for testing, with timeseries in one column"""
return pd.DataFrame(
{
"time": pd.date_range(start="1/1/2018", periods=100),
"A": np.random.randint(0, 100, size=100),
}
)
|
9b6b217e2a4bc80b5f54cecf56c55d5fb229d288
| 3,639,154
|
from typing import Union
def n_tokens(doc: Union[Doc, Span]):
"""Return number of words in the document."""
return len(doc._._filtered_tokens)
|
4b1f1cbb9cb6baf5cb70d6bd38a88d3e0e54610a
| 3,639,155
|
def getJobs(numJobs=1):
"""
Return a list of dictionary data as provided to the plugin `submit` method
"""
job = {'allowOpportunistic': False,
'bulkid': None,
'cache_dir': TEST_DIR + '/JobCollection_1_0/job_1',
'estimatedDiskUsage': 5000000,
'estimatedJobTime': 28800,
'estimatedMemoryUsage': 6000.0,
'gridid': None,
'id': 1L,
'inputDataset': '/HLTPhysics/Run2017B-PromptReco-v1/AOD',
'inputDatasetLocations': ['T2_CH_CERN_HLT', 'T2_CH_CERN'],
'jobid': 1L,
'location': 'T2_CH_CERN',
'name': '934a7f0d-2934-4939-b366-0a9efe0df15e-0',
'numberOfCores': 8,
'packageDir': TEST_DIR + '/batch_1-0',
'plugin': 'SimpleCondorPlugin',
'possibleSites': [u'T2_CH_CERN', u'T1_US_FNAL'],
'potentialSites': frozenset([u'T1_US_FNAL', u'T2_CH_CERN']),
'proxyPath': None,
'request_name': 'amaltaro_test_submission_180620_105409_2045',
'retry_count': 0L,
'sandbox': TEST_DIR + '/Blah-Sandbox.tar.bz2',
'scramArch': ['slc6_amd64_gcc630'],
'siteName': u'T2_CH_CERN',
'site_cms_name': 'T2_CH_CERN',
'status': None,
'status_time': None,
'swVersion': ['CMSSW_9_4_0'],
'taskPriority': 0L,
'task_id': 383L,
'task_name': '/amaltaro_test_submission_180620_105409_2045/Blah_Task',
'task_type': 'Processing',
'userdn': '/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=amaltaro/CN=718748/CN=Alan Malta Rodrigues',
'usergroup': 'unknown',
'userrole': 'unknown',
'wf_priority': 420000L}
jobs = []
for i in range(0, numJobs):
job.update({'id': long(i), 'jobid': long(i), 'name': makeUUID()})
jobs.append(deepcopy(job))
return jobs
|
56543a5a6ef66ec7fdf9f3ef26594eafa3f7bb41
| 3,639,156
|
def create_test_user():
"""Creates a new user with random username for testing
If two randomly assigned usernames overlap, it will fail
"""
UserModel = get_user_model()
username = '%s_%s' % ('test', uuid4().get_hex()[:10],)
user = UserModel.objects.create(username=username)
return user
|
d20ecbdb07db886a526402c09d7d14d768329c2b
| 3,639,157
|
def make_logical_or_tests(options):
"""Make a set of tests to do logical_or."""
return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1)
|
b4c7f5c0d89139938881f7301930651c9a3e7d0a
| 3,639,158
|
def guess(key, values):
"""
Returns guess values for the parameters of this function class based on the input. Used for fitting using this
class.
:param key:
:param values:
:return:
"""
return [min(values)-max(values), (max(key)-min(key))/3, min(values)]
|
908868b150340b02ba61fcc6ccf5937ba31bfe30
| 3,639,159
|
from datetime import datetime
import time
def add_metadata_values_to_record(record_message, schema_message):
"""Populate metadata _sdc columns from incoming record message
The location of the required attributes are fixed in the stream
"""
extended_record = record_message['record']
extended_record['_sdc_batched_at'] = datetime.now().isoformat()
extended_record['_sdc_deleted_at'] = record_message.get('record', {}).get('_sdc_deleted_at')
extended_record['_sdc_extracted_at'] = record_message.get('time_extracted')
extended_record['_sdc_primary_key'] = schema_message.get('key_properties')
extended_record['_sdc_received_at'] = datetime.now().isoformat()
extended_record['_sdc_sequence'] = int(round(time.time() * 1000))
extended_record['_sdc_table_version'] = record_message.get('version')
return extended_record
|
e85e2620b816907204443af1c014ca4d927cb20c
| 3,639,160
|
from datetime import datetime
def manipulate_reservation_action(request: HttpRequest, default_foreward_url: str):
"""
This function is used to alter the reservation beeing build inside
a cookie. This function automatically crafts the required response.
"""
js_string: str = ""
r: GroupReservation = None
u: Profile = get_current_user(request)
forward_url: str = default_foreward_url
if request.GET.get("redirect"):
forward_url = request.GET["redirect"]
if "srid" in request.GET:
if not request.GET.get("rid"):
return HttpResponseRedirect("/admin?error=missing%20primary%20reservation%20id")
srid: int = int(request.GET["srid"])
sr: SubReservation = None
if srid == 0:
sr = SubReservation()
else:
sr = SubReservation.objects.get(id=srid)
if request.POST.get("notes"):
sr.notes = escape(request.POST["notes"])
else:
sr.notes = " "
sr.primary_reservation = GroupReservation.objects.get(id=int(request.GET["rid"]))
sr.save()
print(request.POST)
print(sr.notes)
return HttpResponseRedirect("/admin/reservations/edit?rid=" + str(int(request.GET["rid"])) + "&srid=" + str(sr.id))
if "rid" in request.GET:
# update reservation
r = GroupReservation.objects.get(id=int(request.GET["rid"]))
elif u.number_of_allowed_reservations > GroupReservation.objects.all().filter(createdByUser=u).count():
r = GroupReservation()
r.createdByUser = u
r.ready = False
r.open = True
r.pickupDate = datetime.datetime.now()
else:
return HttpResponseRedirect("/admin?error=Too%20Many%20reservations")
if request.POST.get("notes"):
r.notes = escape(request.POST["notes"])
if request.POST.get("contact"):
r.responsiblePerson = escape(str(request.POST["contact"]))
if (r.createdByUser == u or o.rights > 1) and not r.submitted:
r.save()
else:
return HttpResponseRedirect("/admin?error=noyb")
response: HttpResponseRedirect = HttpResponseRedirect(forward_url + "?rid=" + str(r.id))
return response
|
f93b8e2ed68daebdf04aa15898e52f41a5df1e49
| 3,639,161
|
def _dense_to_sparse(data):
"""Convert a numpy array to a tf.SparseTensor."""
indices = np.where(data)
return tf.SparseTensor(
np.stack(indices, axis=-1), data[indices], dense_shape=data.shape)
|
b1fe24dd82eff2aa31e40f6b86e75f655e7141c7
| 3,639,162
|
def getflookup(facetid):
"""
find out if a facet with this id has been saved to the facet_files table
"""
found = FacetLookup.objects.all().values_list('graphdb', flat=True).get(id=facetid)
if found:
return True
else:
return False
|
a1c6b0ec7e8ab96eef16574e64ac1948f0fa8419
| 3,639,163
|
def numeric_to_string(year):
"""
Convert numeric year to string
"""
if year < 0 :
yearstring = "{}BC".format(year*-1)
elif year >= 0:
yearstring = "{}AD".format(year)
else:
raise
return yearstring
|
3469e2dd5e05c49b4861782da2dd88bac781c61d
| 3,639,164
|
def _get_num_ve_sve_and_max_num_cells(cell_fracs):
"""
Calculate the num_ve, num_sve and max_num_cells
Parameters
----------
cell_fracs : structured array, optional
A sorted, one dimensional array,
each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
Returns
-------
num_ve : int
Number of the total voxels
num_sve : int
Number of the total subvoxels, eqaul to or greater than num_ve
max_num_cells : int
Max number of cells (subvoxels) in a voxel
"""
num_sve = len(cell_fracs)
num_ve = len(set(cell_fracs["idx"]))
max_num_cells = -1
for i in range(num_sve):
max_num_cells = max(max_num_cells, len(cell_fracs[cell_fracs["idx"] == i]))
return num_ve, num_sve, max_num_cells
|
c0d154898bbfeafd66d89a2741dda8c2aa885a9a
| 3,639,165
|
from datetime import datetime
def is_void(at):
"""Returns True if the given object is an ``adatetime`` with all of its
attributes equal to None.
"""
if isinstance(at, datetime):
return False
return all((getattr(at, attr) is None) for attr in adatetime.units)
|
49744c361177060b508d5537a1ace16da6aef37d
| 3,639,166
|
def _get_metric_fn(params):
"""Get the metrix fn used by model compile."""
batch_size = params["batch_size"]
def metric_fn(y_true, y_pred):
"""Returns the in_top_k metric."""
softmax_logits = y_pred
logits = tf.slice(softmax_logits, [0, 1], [batch_size, 1])
# The dup mask should be obtained from input data, but we did not yet find
# a good way of getting it with keras, so we set it to zeros to neglect the
# repetition correction
dup_mask = tf.zeros([batch_size, 1])
cross_entropy, metric_fn, in_top_k, ndcg, metric_weights = (
neumf_model.compute_eval_loss_and_metrics_helper(
logits,
softmax_logits,
dup_mask,
params["num_neg"],
params["match_mlperf"],
params["use_xla_for_gpu"]))
in_top_k = tf.cond(
tf.keras.backend.learning_phase(),
lambda: tf.zeros(shape=in_top_k.shape, dtype=in_top_k.dtype),
lambda: in_top_k)
return in_top_k
return metric_fn
|
2793975542241f36850aaaaef4256aa59ea4873f
| 3,639,167
|
def check():
"""Check if all required modules are present.
Returns 0 on success, non-zero on error.
"""
flag = 0
for package in import_list:
try:
exec( "import " + package )
except Exception:
log.error( "Missing module: %s", package )
flag = True
if flag:
return 1
return 0
|
027ae4346a642740ca4b1ef4ebec5a831688f850
| 3,639,168
|
def flip_nums(text):
""" flips numbers on string to the end (so 2019_est --> est_2019)"""
if not text:
return ''
i = 0
s = text + '_'
while text[i].isnumeric():
s += text[i]
i += 1
if text[i] == '_':
i += 1
return s[i:]
|
e0534e25e95b72e1d6516111413e32a6dae207ef
| 3,639,169
|
def nnls(A, b, k=None, maxiter=None):
"""
Compute the least-squares solution to the equation ``A @ x = b`` subject to
the nonnegativity constraints ``x[:k] >= 0``.
Parameters
----------
A : array_like, shape (m, n)
Matrix `A` as shown above.
b : array_like, shape (m,)
Right-hand side vector `b` as shown above.
k : int, optional
Number of nonnegativity constraints. The first `k` components of the
solution vector are nonnegative (the default is ``A.shape[1]``).
maxiter : int, optional
Maximum number of inner iterations (the default is ``3 * A.shape[1]``).
Returns
-------
x : numpy.ndarray, shape (n,)
Solution vector ``x`` as shown above.
See Also
--------
bvtcg : Bounded variable truncated conjugate gradient
cpqp : Convex piecewise quadratic programming
lctcg : Linear constrained truncated conjugate gradient
Notes
-----
The method is adapted from the NNLS algorithm [1]_.
References
----------
.. [1] C. L. Lawson and R. J. Hanson. Solving Least Squares Problems.
Classics Appl. Math. Philadelphia, PA, US: SIAM, 1974.
"""
A = np.atleast_2d(A)
if A.dtype.kind in np.typecodes['AllInteger']:
A = np.asarray(A, dtype=float)
A = np.asfortranarray(A)
b = np.atleast_1d(b)
if b.dtype.kind in np.typecodes['AllInteger']:
b = np.asarray(b, dtype=float)
n = A.shape[1]
if k is None:
k = n
if k < 0 or k > n:
raise ValueError('Number of nonnegative constraints is invalid')
if maxiter is None:
maxiter = 3 * n
# Check the sizes of the inputs.
assert_(A.ndim == 2)
assert_(b.ndim == 1)
assert_(A.shape[0] == b.size)
x = _nnls(A, b, k, maxiter) # noqa
return np.array(x, dtype=float)
|
4d6c7e7d53e570222b752c4bf2013100c15b7297
| 3,639,170
|
def extent2(texture):
""" Returns the extent of the image data (0.0-1.0, 0.0-1.0) inside its texture owner.
Textures have a size power of 2 (512, 1024, ...), but the actual image can be smaller.
For example: a 400x250 image will be loaded in a 512x256 texture.
Its extent is (0.78, 0.98), the remainder of the texture is transparent.
"""
return (texture.tex_coords[3], texture.tex_coords[7])
|
16c6d220ad48201fd133ed11c97452bf0831c0d8
| 3,639,173
|
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
# Store the total length of the hand
hand_len = 0
# For every letter in the hand
for key in hand.keys():
# Add the number of times that letter appears in the hand
# to the variable storing hand length
hand_len += hand[key]
# Return the number of letters in the current hand
return hand_len
|
297f8af5943bf87bb7999a1212d54430857de12b
| 3,639,174
|
def add_fieldmap(fieldmap: BIDSFile, layout: BIDSLayout) -> dict:
"""
Locates fieldmap-related json file and adds them in an appropriate dictionary with keys that describe their directionality
Parameters
----------
fieldmap : BIDSFile
Fieldmap's NIfTI
layout : BIDSLayout
BIDSLayout instance for the queried bids directory.
Returns
-------
dict
Dictionary of fieldmap's NIfTI and json with appropriate keys.
"""
entities = fieldmap.get_entities()
entities.pop("fmap")
direction = entities.get("direction")
entities["extension"] = "json"
json = layout.get(**entities)
fieldmap_dict = {f"fmap_{direction}": fieldmap.path}
if json:
fieldmap_dict[f"fmap_{direction}_json"] = json[0].path
return fieldmap_dict
|
227fa27d9ecb2f260700debc6b2837d60018bd61
| 3,639,175
|
def fit_plane_lstsq(XYZ):
"""
Fits a plane to a point cloud.
Where z=a.x+b.y+c; Rearranging: a.x+b.y-z+c=0
@type XYZ: list
@param XYZ: list of points
@rtype: np.array
@return: normalized normal vector of the plane in the form C{(a,b,-1)}
"""
[rows, cols] = XYZ.shape
G = np.ones((rows, 3))
G[:, 0] = XYZ[:, 0] # X
G[:, 1] = XYZ[:, 1] # Y
Z = XYZ[:, 2]
(a, b, c), resid, rank, s = np.linalg.lstsq(G, Z)
normal = (a, b, -1)
nn = np.linalg.norm(normal)
normal = normal / nn
return normal
|
c734cb17462e72c40bb65464c42d298c21e4a922
| 3,639,176
|
def clean_name(name: str) -> str:
"""Clean a string by capitalizing and removing extra spaces.
Args:
name: the name to be cleaned
Returns:
str: the cleaned name
"""
name = " ".join(name.strip().split())
return str(titlecase.titlecase(name))
|
e19354767d38164004c984c76827b2882ef4c4fd
| 3,639,177
|
from typing import Callable
from re import T
from typing import List
def pull_list(buf: Buffer, capacity: int, func: Callable[[], T]) -> List[T]:
"""
Pull a list of items.
"""
items = []
with pull_block(buf, capacity) as length:
end = buf.tell() + length
while buf.tell() < end:
items.append(func())
return items
|
ab9833fdab157e05df00d65dee96080c98140bb2
| 3,639,178
|
def ResNet(
stack_fn, preact, use_bias, model_name='resnet', include_top=True, weights='imagenet',
input_tensor=None, input_shape=None, pooling=None, classes=1000,
classifier_activation='softmax', bottomright_maxpool_test=False,
use_group_norm=False, **kwargs):
"""Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
stack_fn: a function that returns output tensor for the
stacked residual blocks.
preact: whether to use pre-activation or not
(True for ResNetV2, False for ResNet and ResNeXt).
use_bias: whether to use biases for convolutional layers or not
(True for ResNet and ResNetV2, False for ResNeXt).
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(),
require_flatten=include_top, weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = layers.Conv2D(
64, 7, strides=2, use_bias=use_bias and not use_group_norm, name='conv1_conv')(x)
if use_group_norm:
def norm_layer(name):
return tfa.layers.GroupNormalization(epsilon=batchnorm_epsilon, name=name)
else:
def norm_layer(name):
return layers.BatchNormalization(
axis=bn_axis, epsilon=batchnorm_epsilon, momentum=batchnorm_momentum,
name=name)
if not preact:
x = norm_layer(name='conv1_gn' if use_group_norm else 'conv1_bn')(x)
x = layers.Activation('relu', name='conv1_relu')(x)
padding_layer = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')
if bottomright_maxpool_test:
padding_test = layers.ZeroPadding2D(padding=((0, 2), (0, 2)), name='pool1_pad')
padding_layer = TrainTestSwitchLayer(padding_layer, padding_test)
x = padding_layer(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
if preact:
x = norm_layer(name='post_gn' if use_group_norm else 'post_bn')(x)
x = layers.Activation('relu', name='post_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if use_group_norm:
model_name = model_name + '_groupnorm'
model = training.Model(inputs, x, name=model_name)
# Load weights.
if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):
if include_top:
file_name = model_name + f'_weights_tf_dim_ordering_tf_kernels.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = model_name + f'_weights_tf_dim_ordering_tf_kernels_notop.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = data_utils.get_file(
file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir='models', file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
|
810b04481eb6ad5d8b3723b87581b3f2136cc80f
| 3,639,179
|
import yaml
def read_yaml(yaml_path):
"""
Read yaml file from the path
:param yaml_path:
:return:
"""
stream = open(yaml_path, "r")
docs = yaml.load_all(stream)
result = dict()
for doc in docs:
for k, v in doc.items():
result[k] = v
return result
|
a3f32d6f5c6cb5c8e94ad9b68a0540aa001f83b2
| 3,639,180
|
def _server_allow_run_on_save() -> bool:
"""Allows users to automatically rerun when app is updated.
Default: true
"""
return True
|
3a895abd8201ce97c8f2f928b841eb86bf6327d1
| 3,639,181
|
def _strip_schema(url):
"""Returns the url without the s3:// part"""
result = urlparse(url)
return result.netloc + result.path
|
9e7dc96c23d799f202603109cd08b2fe049951a5
| 3,639,182
|
def simple_word_tokenize(text, _split=GROUPING_SPACE_REGEX.split):
"""
Split text into tokens. Don't split by a hyphen.
Preserve punctuation, but not whitespaces.
"""
return [t for t in _split(text) if t and not t.isspace()]
|
5b9e66d2a369340028b4ece2eee083511d0e9746
| 3,639,183
|
def merge_strategy(media_identifier, target_site, sdc_data, strategy):
"""
Check if the file already holds Structured Data, if so resolve what to do.
@param media_identifier: Mid of the file
@param target_site: pywikibot.Site object to which file should be uploaded
@param sdc_data: internally formatted Structured Data in json format
@param strategy: Strategy used for merging uploaded data with pre-existing
data. Allowed values are None, "New", "Blind", "Add" and "Nuke".
@return: dict of pids and caption languages removed from sdc_data due to
conflicts.
@raises: ValueError, SdcException
"""
prior_data = _get_existing_structured_data(media_identifier, target_site)
if not prior_data:
# even unknown strategies should pass if there is no prior data
return
if not strategy:
raise SdcException(
'warning', 'pre-existing sdc-data',
('Found pre-existing SDC data, no new data will be added. '
'Found data: {}'.format(prior_data))
)
strategy = strategy.lower()
if strategy in ('new', 'add'):
pre_pids = prior_data['statements'].keys()
pre_langs = prior_data['labels'].keys()
new_langs = sdc_data.get('caption', dict()).keys()
if strategy == 'add':
pid_clash = set(pre_pids).intersection(sdc_data.keys())
lang_clash = set(pre_langs).intersection(new_langs)
for pid in pid_clash:
sdc_data.pop(pid, None)
for lang in lang_clash:
sdc_data['caption'].pop(lang, None)
if (not any(is_prop_key(key) for key in sdc_data.keys())
and not sdc_data.get('caption')):
# warn if not data left to upload
raise SdcException(
'warning', 'all conflicting pre-existing sdc-data',
('Found pre-existing SDC data, no new non-conflicting '
'data could be added. Found data: {}'.format(
prior_data))
)
elif pid_clash or lang_clash:
return {'pids': pid_clash, 'langs': lang_clash}
elif (not set(pre_pids).isdisjoint(sdc_data.keys())
or not set(pre_langs).isdisjoint(new_langs)):
raise SdcException(
'warning', 'conflicting pre-existing sdc-data',
('Found pre-existing SDC data, no new data will be added. '
'Found data: {}'.format(prior_data))
)
elif strategy not in STRATEGIES:
raise ValueError(
'The `strategy` parameter must be None, "{0}" or "{1}" '
'but "{2}" was provided'.format(
'", "'.join([s.capitalize() for s in STRATEGIES[:-1]]),
STRATEGIES[-1].capitalize(),
strategy.capitalize()))
# pass if strategy is "Blind" or "Nuke"
|
0e59cc312e00cc7d492bfe725b0a9a297734a5e0
| 3,639,184
|
def convert_translations_to_dict(js_translations):
"""Convert a GNUTranslations object into a dict for jsonifying.
Args:
js_translations: GNUTranslations object to be converted.
Returns:
A dictionary representing the GNUTranslations object.
"""
plural, n_plural = _get_plural_forms(js_translations)
translations_dict = {'plural': plural, 'catalog': {}, 'fallback': None}
if js_translations._fallback is not None:
translations_dict['fallback'] = convert_translations_to_dict(
js_translations._fallback
)
for key, value in js_translations._catalog.items():
if key == '':
continue
if isinstance(key, basestring):
translations_dict['catalog'][key] = value
elif isinstance(key, tuple):
if key[0] not in translations_dict['catalog']:
translations_dict['catalog'][key[0]] = [''] * n_plural
translations_dict['catalog'][key[0]][int(key[1])] = value
return translations_dict
|
8db0fc022002504a943f46b429ca71b6e0e90b06
| 3,639,185
|
import asyncio
def reduce(coro, iterable, initializer=None, limit=1, right=False, loop=None):
"""
Apply function of two arguments cumulatively to the items of sequence,
from left to right, so as to reduce the sequence to a single value.
Reduction will be executed sequentially without concurrency,
so passed values would be in order.
This function is the asynchronous coroutine equivalent to Python standard
`functools.reduce()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): reducer coroutine binary function.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
initializer (mixed): initial accumulator value used in
the first reduction call.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
right (bool): reduce iterable from right to left.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
mixed: accumulated final reduced value.
Usage::
async def reducer(acc, num):
return acc + num
await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0)
# => 15
"""
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# Reduced accumulator value
acc = initializer
# If interable is empty, just return the initializer value
if len(iterable) == 0:
return initializer
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Reducer partial function for deferred coroutine execution
def reducer(element):
@asyncio.coroutine
def wrapper():
nonlocal acc
acc = yield from coro(acc, element)
return wrapper
# Support right reduction
if right:
iterable.reverse()
# Iterate and attach coroutine for defer scheduling
for element in iterable:
pool.add(reducer(element))
# Wait until all coroutines finish
yield from pool.run(ignore_empty=True)
# Returns final reduced value
return acc
|
64b55a082df11fa9d6b7971ecd1508c1e4c9f1c9
| 3,639,186
|
def sigm_temp(base_sim_param, assumptions, t_base_type):
"""Calculate base temperature depending on sigmoid diff and location
Parameters
----------
base_sim_param : dict
Base simulation assumptions
assumptions : dict
Dictionary with assumptions
Return
------
t_base_cy : float
Base temperature of current year
Note
----
Depending on the base temperature in the base and end year
a sigmoid diffusion from the base temperature from the base year
to the end year is calculated
This allows to model changes e.g. in thermal confort
"""
# Base temperature of end year minus base temp of base year
t_base_diff = assumptions[t_base_type]['end_yr'] - assumptions[t_base_type]['base_yr']
# Sigmoid diffusion
t_base_frac = diffusion_technologies.sigmoid_diffusion(
base_sim_param['base_yr'],
base_sim_param['curr_yr'],
base_sim_param['end_yr'],
assumptions['smart_meter_diff_params']['sig_midpoint'],
assumptions['smart_meter_diff_params']['sig_steeppness']
)
# Temp diff until current year
t_diff_cy = t_base_diff * t_base_frac
# Add temp change to base year temp
t_base_cy = t_diff_cy + assumptions[t_base_type]['base_yr']
return t_base_cy
|
276af880050698a9f15dcd142aac952809807fdb
| 3,639,187
|
import select
import socket
def is_socket_closed(sock):
"""Check if socket ``sock`` is closed."""
if not sock:
return True
try:
if not poll: # pragma nocover
if not select:
return False
try:
return bool(select([sock], [], [], 0.0)[0])
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
except Exception:
return True
|
e89ddec6e7603b5636f6a6d87831d12f0a76e9d9
| 3,639,188
|
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
|
59325562549656d35b615a3274112357b0c4854c
| 3,639,189
|
def get_implicit_permissions_for_user(user: str, domain=None):
"""
GetImplicitPermissionsForUser gets implicit permissions for a user or role.
Compared to GetPermissionsForUser(), this function retrieves permissions for inherited roles.
For example:
p, admin, data1, read
p, alice, data2, read
g, alice, admin
GetPermissionsForUser("alice") can only get: [["alice", "data2", "read"]].
But GetImplicitPermissionsForUser("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]].
"""
return enforcer.get_implicit_permissions_for_user(user, domain=None)
|
08477a3ac772597f66f36b7b04fc7d8a29f2522b
| 3,639,190
|
def Law_f(text):
"""
:param text: The "text" of this Law
"""
return '\\begin{block}{Law}\n' + text + '\n\\end{block}\n'
|
594b279c5971a9d379666179c4d0633fc02a8bd9
| 3,639,191
|
import operator
from typing import OrderedDict
def ordered_dict_intersection(first_dict, second_dict, compat=operator.eq):
"""Return the intersection of two dictionaries as a new OrderedDict.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equality.
Returns
-------
intersection : OrderedDict
Intersection of the contents.
"""
new_dict = OrderedDict(first_dict)
remove_incompatible_items(new_dict, second_dict, compat)
return new_dict
|
cfef1a1d5c3cc9fc5b792a68bae0fe8279b752da
| 3,639,192
|
import scipy
def get_cl2cf_matrices(theta_bin_edges, lmin, lmax):
"""
Returns the set of matrices to go from one entire power spectrum to one binned correlation function.
Args:
theta_bin_edges (1D numpy array): Angular bin edges in radians.
lmin (int): Minimum l.
lmax (int): Maximum l.
Returns:
(2D numpy array, \
2D numpy array, \
2D numpy array): Tuple of matrices to each go from one entire power spectrum to one binned \
correlation function for different spins: (0-0, 2-2, 0-2). The spin-2-2 matrix is only for \
xi+, not xi-.
"""
# Calculate Legendre functions and their derivatives up to lmax
# pl and dpl indexed as [theta_idx, l]
cos_thetas = np.cos(theta_bin_edges)
pl_dpl = np.array([scipy.special.lpn(lmax + 1, cos_theta) for cos_theta in cos_thetas])
pl = pl_dpl[:, 0, :]
dpl = pl_dpl[:, 1, :]
# Calculate various offset combinations of Pl and dPl, and some other useful things
assert lmin >= 2
plplus1 = pl[:, (lmin + 1):] # first is l=lmin+1, last is lmax+1
plminus1 = pl[:, (lmin - 1):lmax] # first is l=lmin-1, last is lmax-1
xpl = cos_thetas[:, np.newaxis] * pl[:, lmin:(lmax + 1)]
xdpl = cos_thetas[:, np.newaxis] * dpl[:, lmin:(lmax + 1)]
dplminus1 = dpl[:, (lmin - 1):lmax]
xdplminus1 = cos_thetas[:, np.newaxis] * dplminus1
ell = np.arange(lmin, lmax + 1)
two_ell_plus1 = 2 * ell + 1
cos_theta_diff = np.diff(cos_thetas)
# Calculate bin-averaged Pl, Pl^2 and Gl+/- following Fang et al. eqs 5.6-5.8
# (Also Friedrich et al. DES Y3 covariance paper, which uses a different sign convention but this cancels out.)
# All of these vectorised equations have been validated against much slower loop implementations
# Pl
pl_bin_top_prediff = plplus1 - plminus1
pl_bin_top = np.diff(pl_bin_top_prediff, axis=0)
pl_bin_bottom = np.outer(cos_theta_diff, two_ell_plus1)
pl_bin = pl_bin_top / pl_bin_bottom
# Pl^2
plminus1_coeff = ell + 2 / two_ell_plus1
plminus1_term = plminus1_coeff[np.newaxis, :] * plminus1
xpl_coeff = 2 - ell
xpl_term = xpl_coeff[np.newaxis, :] * xpl
plplus1_coeff = 2 / two_ell_plus1
plplus1_term = plplus1_coeff[np.newaxis, :] * plplus1
pl2_bin_top_prediff = plminus1_term + xpl_term - plplus1_term
pl2_bin_top = np.diff(pl2_bin_top_prediff, axis=0)
pl2_bin_bottom = cos_theta_diff[:, np.newaxis]
pl2_bin = pl2_bin_top / pl2_bin_bottom
# Gl2+ + Gl2-
plminus1_coeff = - ell * (ell - 1) / 2 * (ell + 2 / two_ell_plus1) - (ell + 2)
plminus1_term = plminus1_coeff[np.newaxis, :] * plminus1
xpl_coeff = - ell * (ell - 1) * (2 - ell) / 2
xpl_term = xpl_coeff[np.newaxis, :] * xpl
plplus1_coeff = ell * (ell - 1) / two_ell_plus1
plplus1_term = plplus1_coeff[np.newaxis, :] * plplus1
dpl_coeff = 4 - ell
dpl_term = dpl_coeff * dpl[:, lmin:(lmax + 1)]
xdplminus1_coeff = ell + 2
xdplminus1_term = xdplminus1_coeff[np.newaxis, :] * xdplminus1
xdpl_coeff = 2 * (ell - 1)
xdpl_term = xdpl_coeff[np.newaxis, :] * xdpl
pl_coeff = - 2 * (ell - 1)
pl_term = pl_coeff[np.newaxis, :] * pl[:, lmin:(lmax + 1)]
dplminus1_coeff = - 2 * (ell + 2)
dplminus1_term = dplminus1_coeff[np.newaxis, :] * dplminus1
gplus_bin_top_prediff = (plminus1_term + xpl_term + plplus1_term + dpl_term + xdplminus1_term + xdpl_term + pl_term
+ dplminus1_term)
gplus_bin_top = np.diff(gplus_bin_top_prediff, axis=0)
gplus_bin_bottom = cos_theta_diff[:, np.newaxis]
gplus_bin = gplus_bin_top / gplus_bin_bottom
# Apply relevant prefactors to obtain bin-averaged Wigner d symbols
ell_ellplus1 = (ell * (ell + 1))[np.newaxis, :]
d00_bin = pl_bin
d22plus_bin = 2 / ell_ellplus1 ** 2 * gplus_bin
d02_bin = 1 / ell_ellplus1 * pl2_bin
# Apply final Wigner prefactor to obtain Cl->CF matrices
prefac = (two_ell_plus1 / (4 * np.pi))[np.newaxis, :]
cl2cf_00 = prefac * d00_bin
cl2cf_22plus = prefac * d22plus_bin
cl2cf_02 = prefac * d02_bin
return cl2cf_00, cl2cf_22plus, cl2cf_02
|
0231218c8501409e3660ed6c446b0c163229ab8a
| 3,639,193
|
from operator import concat
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg.reset_index(drop=True)
|
1756380140dd74045880cc4501623c8b48ce5773
| 3,639,194
|
import torch
def valid_from_done(done):
"""Returns a float mask which is zero for all time-steps after a
`done=True` is signaled. This function operates on the leading dimension
of `done`, assumed to correspond to time [T,...], other dimensions are
preserved."""
done = done.type(torch.float)
valid = torch.ones_like(done)
valid[1:] = 1 - torch.clamp(torch.cumsum(done[:-1], dim=0), max=1)
return valid
|
0ca2bd0f9e23605091b2f8d1bc15e67e1632b82b
| 3,639,195
|
import logging
def get_transfer_options(transfer_kind='upload', transfer_method=None):
"""Returns hostnames that the current host can upload or download to.
transfer_kind: 'upload' or 'download'
transfer_method: is specified and not None, return only hosts with which
we can work using this method (e.g. scp)
"""
try:
transfer_options = get_config(get_hostname())[
'%s_options' % transfer_kind]
except LookupError:
logging.info("Host %s has no known transfer options.",
get_hostname())
return []
if transfer_method is not None:
transfer_options = [to for to in transfer_options
if get_config(to['host'])['method'] == 'method']
return transfer_options
|
f5aea7498bf98d3be3fe9e97eda4e6eaa9181cea
| 3,639,196
|
def calc_utility_np(game, iter):
"""Calc utility of current position
Parameters
----------
game : camel up game
Camel up game class
iter : int
Iterations to run the monte carlo simulations
Returns
-------
np.array
Numpy structured array with expected utilities
"""
coins = coins_to_numpy(game)
if str(game.camel_dict) + str(game.tiles_dict) in CACHE.keys():
turn_prob_first, turn_prob_second, turn_prob_other, exp_tile_points = CACHE[
str(game.camel_dict) + str(game.tiles_dict)
][0]
game_prob_first, game_prob_last = CACHE[
str(game.camel_dict) + str(game.tiles_dict)
][1]
else:
turn_prob_first, turn_prob_second, turn_prob_other, exp_tile_points = turn_prob_numpy(
game, iter
)
game_prob_first, game_prob_last = game_prob_numpy(game, iter)
game_prob_first["prob"] = np.where(
game_prob_first["prob"] < 0.30, 0, game_prob_first["prob"]
)
game_prob_last["prob"] = np.where(
game_prob_last["prob"] < 0.30, 0, game_prob_last["prob"]
)
CACHE[str(game.camel_dict) + str(game.tiles_dict)] = [
(turn_prob_first, turn_prob_second, turn_prob_other, exp_tile_points),
(game_prob_first, game_prob_last),
]
winner_bets, loser_bets = winner_loser_bets_to_numpy(game)
bet_tiles = bet_tiles_to_numpy(game)
util.rename_np(turn_prob_first, ["counts", "prob"], "first")
util.rename_np(turn_prob_second, ["counts", "prob"], "second")
util.rename_np(turn_prob_other, ["counts", "prob"], "other")
bets = util.numpy_left_join(bet_tiles, turn_prob_first, "camel")
bets = util.numpy_left_join(bets, turn_prob_second, "camel")
bets = util.numpy_left_join(bets, turn_prob_other, "camel")
multiply_array = (
(bets["value"] * bets["prob_first"])
+ (bets["bets"] * bets["prob_second"])
- (bets["bets"] * bets["prob_other"])
)
bets = util.add_col_np(bets, "exp_value", multiply_array)
bets_groupby = util.numpy_group_by_sum(bets, "player", "exp_value")
final = util.numpy_left_join(coins, exp_tile_points, "player")
final = util.numpy_left_join(final, bets_groupby, "player")
game_first = util.numpy_left_join(winner_bets, game_prob_first, "camel")
game_last = util.numpy_left_join(loser_bets, game_prob_last, "camel")
game_winner_other = deepcopy(game_first)
game_winner_other["prob"] = 1 - game_first["prob"]
game_loser_other = deepcopy(game_last)
game_loser_other["prob"] = 1 - game_last["prob"]
game_first = util.add_col_np(
game_first, "points", config.BET_SCALING[0 : game_first.shape[0]]
)
game_last = util.add_col_np(
game_last, "points", config.BET_SCALING[0 : game_last.shape[0]]
)
game_winner_other = util.add_col_np(
game_winner_other, "points", [1] * game_winner_other.shape[0]
)
game_loser_other = util.add_col_np(
game_loser_other, "points", [1] * game_loser_other.shape[0]
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_first, "exp_value_first"), "player"
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_last, "exp_value_last"), "player"
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_winner_other, "exp_value_winner_other"), "player"
)
final = util.numpy_left_join(
final, calc_exp_value_np(game_loser_other, "exp_value_loser_other"), "player"
)
multiply_array = (
final["coins"]
+ final["exp_points"]
+ final["exp_value"]
+ final["exp_value_first"]
+ final["exp_value_last"]
- final["exp_value_winner_other"]
- final["exp_value_loser_other"]
)
final = util.add_col_np(final, "utility", multiply_array)
return final
|
c69740652ea18d753c9a2a894f1ba36ab1eecff8
| 3,639,197
|
def add_masses(line, mass_light, mass_heavy):
"""
Add m/z information in the output lines
"""
new_line = "{} {} {}\n".format(round_masses(mass_light), round_masses(mass_heavy), line)
return new_line
|
d8e92acf43d17e9a00de1e985e6cecadec0fa4b4
| 3,639,198
|
def load_r_ind_sent_bars():
"""
Loads the random index-barcodes of the actual networks
"""
bars = []
for text in texts:
bars.append(np.load('Textbooks/{}/r_ind_sent_bars.npy'.format(text)))
return bars
|
331b217976bc5a03a4e3a20331f06ba33a7aaad1
| 3,639,199
|
import pickle
def load_pickle(indices, image_data):
""""
0: Empty
1: Active
2: Inactive
"""
size = 13
# image_data = "./data/images.pkl"
with open(image_data, "rb") as f:
images = pickle.load(f)
x = []
y = []
n = []
cds = []
for idx in indices:
D_dict = images[idx]
img = D_dict['image']
label = D_dict['label']
row, col = label.shape
length, width = img.shape
img = np.expand_dims(img, axis=-1)
img_r, img_c = 40, 40
for g_r in range(1, row-1):
img_c = 40
for g_c in range(1, col-1):
# Check whether it's empty
if label[g_r][g_c] == 0.0:
pass
else:
l = img_c - size
u = img_r - size
r = img_c + size + 1
d = img_r + size + 1
pt = img[u:d, l:r]
nb = get_neibs_cds(img, l, u)
lb = label[g_r][g_c]
x.append(pt)
y.append(lb)
n.append(nb)
cds.append((img_r, img_c))
img_c += 27
img_r += 27
x = np.array(x)
y = np.array(y)
n = np.array(n)
return x, y, n, cds
|
dff3eeb151c8f32511c8d62d8bc9fa313bc36019
| 3,639,200
|
def summarize_vref_locs(locs:TList[BaseObjLocation]) -> pd.DataFrame:
"""
Return a table with cols (partition, num vrefs)
"""
vrefs_by_partition = group_like(objs=locs, labels=[loc.partition for loc in locs])
partition_sort = sorted(vrefs_by_partition)
return pd.DataFrame({
'Partition': partition_sort,
'Number of vrefs': [len(vrefs_by_partition[k]) for k in partition_sort]
})
|
3894404874004e70ab0cc243af4f645f5cf84582
| 3,639,201
|
def rescale_list_to_range(original, limits):
"""
Linearly rescale values in original list to limits (minimum and maximum).
:example:
>>> rescale_list_to_range([1, 2, 3], (0, 10))
[0.0, 5.0, 10.0]
>>> rescale_list_to_range([1, 2, 3], (-10, 0))
[-10.0, -5.0, 0.0]
>>> rescale_list_to_range([1, 2, 3], (0j, 10j))
[0j, 5j, 10j]
:param original: Original list or list-like to be rescaled.
:type original: list
:param limits: Tuple of two floats, min and max, to constrain the new list
:type limits: tuple
:return: Original list rescaled to fit between min and max
:rtype: list
"""
new_min, new_max = limits[0:2]
old_min, old_max = min(original), max(original)
return (new_max + new_min) / 2 * original / old_min if old_min == old_max \
else [new_max * (v - old_min) / (old_max - old_min) +
new_min * (old_max - v) / (old_max - old_min) for v in original]
|
bdd38bb24b597648e4ca9045ed133dfe93ad4bd8
| 3,639,202
|
from typing import Optional
from typing import Union
from typing import Mapping
def build_list_request(
filters: Optional[dict[str, str]] = None
) -> Union[IssueListInvalidRequest, IssueListValidRequest]:
"""Create request from filters."""
accepted_filters = ["obj__eq", "state__eq", "title__contains"]
invalid_req = IssueListInvalidRequest()
if filters is not None:
if not isinstance(filters, Mapping):
invalid_req.add_error("filters", "Is not iterable")
return invalid_req
for key, value in filters.items():
if key not in accepted_filters:
invalid_req.add_error("filters", f"Key {key} cannot be used.")
if (key == "obj__eq" and value not in ["pull request", "issue", "all"]) or (
key == "state__eq" and value not in ["all", "open", "closed"]
):
invalid_req.add_error(
"filters", f"Value {value} for key 'obj__eq' cannot be used."
)
if invalid_req.has_errors():
return invalid_req
return IssueListValidRequest(filters=filters)
|
b0fc85921f11ef28071eba8be4ab1a7a4837b56c
| 3,639,203
|
def get_ratings(labeled_df):
"""Returns list of possible ratings."""
return labeled_df.RATING.unique()
|
2b88b1703ad5b5b0a074ed7bc4591f0e88d97f92
| 3,639,204
|
from typing import Dict
def split_edge_cost(
edge_cost: EdgeFunction, to_split: LookupToSplit
) -> Dict[Edge, float]:
"""Assign half the cost of the original edge to each of the split edges.
Args:
edge_cost: Lookup from edges to cost.
to_split: Lookup from original edges to pairs of split edges
(see [lookup_to_split][tspwplib.converter.lookup_to_split]).
Returns:
Lookup from split edges to cost.
Notes:
The cost is cast to a float.
"""
split_cost = {}
for edge, cost in edge_cost.items():
first_split, second_split = to_split[edge]
half_cost = float(cost) / 2.0
split_cost[first_split] = half_cost
split_cost[second_split] = half_cost
return split_cost
|
8e307f6dfd19d65ec1979fa0eafef05737413b3d
| 3,639,205
|
def get_ants_brain(filepath, metadata, channel=0):
"""Load .nii brain file as ANTs image."""
nib_brain = np.asanyarray(nib.load(filepath).dataobj).astype('uint32')
spacing = [float(metadata.get('micronsPerPixel_XAxis', 0)),
float(metadata.get('micronsPerPixel_YAxis', 0)),
float(metadata.get('micronsPerPixel_ZAxis', 0)),
float(metadata.get('sample_period', 0))]
spacing = [spacing[x] for x in range(4) if metadata['image_dims'][x] > 1]
if len(nib_brain.shape) > 4: # multiple channels
# trim to single channel
return ants.from_numpy(np.squeeze(nib_brain[..., channel]), spacing=spacing)
else:
# return ants.from_numpy(np.squeeze(nib_brain[..., :300]), spacing=spacing) # TESTING
return ants.from_numpy(np.squeeze(nib_brain), spacing=spacing)
|
5011d1f609d818c1769900542bc07b8194a4a10f
| 3,639,206
|
def numpy_max(x):
"""
Returns the maximum of an array.
Deals with text as well.
"""
return numpy_min_max(x, lambda x: x.max(), minmax=True)
|
0b32936cde2e0f6cbebf62016c30e4265aba8b57
| 3,639,207
|
import copy
def get_train_val_test_splits(X, y, max_points, seed, confusion, seed_batch,
split=(2./3, 1./6, 1./6)):
"""Return training, validation, and test splits for X and y.
Args:
X: features
y: targets
max_points: # of points to use when creating splits.
seed: seed for shuffling.
confusion: labeling noise to introduce. 0.1 means randomize 10% of labels.
seed_batch: # of initial datapoints to ensure sufficient class membership.
split: percent splits for train, val, and test.
Returns:
indices: shuffled indices to recreate splits given original input data X.
y_noise: y with noise injected, needed to reproduce results outside of
run_experiments using original data.
"""
np.random.seed(seed)
X_copy = copy.copy(X)
y_copy = copy.copy(y)
# Introduce labeling noise
y_noise = flip_label(y_copy, confusion)
indices = np.arange(len(y))
if max_points is None:
max_points = len(y_noise)
else:
max_points = min(len(y_noise), max_points)
train_split = int(max_points * split[0])
val_split = train_split + int(max_points * split[1])
assert seed_batch <= train_split
# Do this to make sure that the initial batch has examples from all classes
min_shuffle = 3
n_shuffle = 0
y_tmp = y_noise
# Need at least 4 obs of each class for 2 fold CV to work in grid search step
while (any(get_class_counts(y_tmp, y_tmp[0:seed_batch]) < 4)
or n_shuffle < min_shuffle):
np.random.shuffle(indices)
y_tmp = y_noise[indices]
n_shuffle += 1
X_train = X_copy[indices[0:train_split]]
X_val = X_copy[indices[train_split:val_split]]
X_test = X_copy[indices[val_split:max_points]]
y_train = y_noise[indices[0:train_split]]
y_val = y_noise[indices[train_split:val_split]]
y_test = y_noise[indices[val_split:max_points]]
# Make sure that we have enough observations of each class for 2-fold cv
assert all(get_class_counts(y_noise, y_train[0:seed_batch]) >= 4)
# Make sure that returned shuffled indices are correct
assert all(y_noise[indices[0:max_points]] ==
np.concatenate((y_train, y_val, y_test), axis=0))
return (indices[0:max_points], X_train, y_train,
X_val, y_val, X_test, y_test, y_noise)
|
3f76dade9dd012666f29742b3ec3749d9bcfafe2
| 3,639,208
|
def require_apikey(key):
"""
Decorator for view functions and API requests. Requires
that the user pass in the API key for the application.
"""
def _wrapped_func(view_func):
def _decorated_func(*args, **kwargs):
passed_key = request.args.get('key', None)
if passed_key == key:
return view_func(*args, **kwargs)
else:
abort(401)
return _decorated_func
return _wrapped_func
|
9db9be28c18cd84172dce27d27be9bfcc6f7376e
| 3,639,209
|
from math import cos,pi
from numpy import zeros
def gauss_legendre(ordergl,tol=10e-14):
"""
Returns nodal abscissas {x} and weights {A} of
Gauss-Legendre m-point quadrature.
"""
m = ordergl + 1
def legendre(t,m):
p0 = 1.0; p1 = t
for k in range(1,m):
p = ((2.0*k + 1.0)*t*p1 - k*p0)/(1.0 + k )
p0 = p1; p1 = p
dp = m*(p0 - t*p1)/(1.0 - t**2)
return p1,dp
A = zeros(m)
x = zeros(m)
nRoots = (m + 1)// 2 # Number of non-neg. roots
for i in range(nRoots):
t = cos(pi*(i + 0.75)/(m + 0.5)) # Approx. root
for j in range(30):
p,dp = legendre(t,m) # Newton-Raphson
dt = -p/dp; t = t + dt # method
if abs(dt) < tol:
x[i] = t; x[m-i-1] = -t
A[i] = 2.0/(1.0 - t**2)/(dp**2) # Eq.(6.25)
A[m-i-1] = A[i]
break
return x,A
|
5353373ee59cd559817a737271b4ff89cc031709
| 3,639,210
|
def simple_message(msg, parent=None, title=None):
"""
create a simple message dialog with string msg. Optionally set
the parent widget and dialog title
"""
dialog = gtk.MessageDialog(
parent = None,
type = gtk.MESSAGE_INFO,
buttons = gtk.BUTTONS_OK,
message_format = msg)
if parent is not None:
dialog.set_transient_for(parent)
if title is not None:
dialog.set_title(title)
dialog.show()
dialog.run()
dialog.destroy()
return None
|
c6b021a4345f51f58fdf530441596001843b0506
| 3,639,211
|
def accept(value):
"""Accept header class and method decorator."""
def accept_decorator(t):
set_decor(t, 'header', CaseInsensitiveDict({'Accept': value}))
return t
return accept_decorator
|
f7b392c2b9ab3024e96856cbcda9752a9076ea73
| 3,639,212
|
from pathlib import Path
def screenshot(widget, path=None, dir=None):
"""Save a screenshot of a Qt widget to a PNG file.
By default, the screenshots are saved in `~/.phy/screenshots/`.
Parameters
----------
widget : Qt widget
Any widget to capture (including OpenGL widgets).
path : str or Path
Path to the PNG file.
"""
path = path or screenshot_default_path(widget, dir=dir)
path = Path(path).resolve()
if isinstance(widget, QOpenGLWindow):
# Special call for OpenGL widgets.
widget.grabFramebuffer().save(str(path))
else:
# Generic call for regular Qt widgets.
widget.grab().save(str(path))
logger.info("Saved screenshot to %s.", path)
return path
|
dbb221f25f1b2dbe4b439afda225c452692b24fb
| 3,639,213
|
def xyz_to_rtp(x, y, z):
"""
Convert 1-D Cartesian (x, y, z) coords. to 3-D spherical coords.
(r, theta, phi).
The z-coord. is assumed to be anti-parallel to the r-coord. when
theta = 0.
"""
# First establish 3-D versions of x, y, z
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
# Calculate 3-D spherical coordinate vectors.
rr = np.sqrt(xx**2 + yy**2 + zz**2)
tt = np.arccos(zz / rr)
pp = np.arccos(xx / np.sqrt(xx**2 + yy**2))
return rr, tt, pp
|
db8fbcb50cde2c529fe94e546b0caaea79327df6
| 3,639,214
|
import re
def irccat_targets(bot, targets):
"""
Go through our potential targets and place them in an array so we can
easily loop through them when sending messages.
"""
result = []
for s in targets.split(','):
if re.search('^@', s):
result.append(re.sub('^@', '', s))
elif re.search('^#', s) and s in bot.config.core.channels:
result.append(s)
elif re.search('^#\*$', s):
for c in bot.config.core.channels:
result.append(c)
return result
|
b7dce597fc301930aae665c338a9e9ada5f2be7e
| 3,639,215
|
import struct
def _watchos_stub_partial_impl(
*,
ctx,
actions,
binary_artifact,
label_name,
watch_application):
"""Implementation for the watchOS stub processing partial."""
bundle_files = []
providers = []
if binary_artifact:
# Create intermediate file with proper name for the binary.
intermediate_file = intermediates.file(
actions,
label_name,
"WK",
)
actions.symlink(
target_file = binary_artifact,
output = intermediate_file,
)
bundle_files.append(
(processor.location.bundle, "_WatchKitStub", depset([intermediate_file])),
)
providers.append(_AppleWatchosStubInfo(binary = intermediate_file))
if watch_application:
binary_artifact = watch_application[_AppleWatchosStubInfo].binary
bundle_files.append(
(processor.location.archive, "WatchKitSupport2", depset([binary_artifact])),
)
return struct(
bundle_files = bundle_files,
providers = providers,
)
|
dd4342893eb933572262a3b3bd242112c1737b3b
| 3,639,216
|
def catMullRomFit(p, nPoints=100):
"""
Return as smoothed path from a list of QPointF objects p, interpolating points if needed.
This function takes a set of points and fits a CatMullRom Spline to the data. It then
interpolates the set of points and outputs a smoothed path with the desired number of points
on it.
p : the path to be smoothed
nPoints : the desired number of points in the smoothed path
"""
N = len(p)
#there is no re interpolation required
if N == nPoints:
return p
interp = []
dj = 1.0 / nPoints
for j in range(0, nPoints):
di = j * dj * (N - 1)
i = int(di)
x = di - i
xx = x * x
xxx = x * x * x
c0 = 2.0 * xxx - 3.0 * xx + 1.0
c1 = xxx - 2.0 * xx + x
c2 = -2.0 * xxx + 3.0 * xx
c3 = xxx - xx
p0 = p[i]
p1 = p0
p2 = p0
p3 = p0
if i + 1 < N:
p1 = p[i + 1]
if i - 1 > -1:
p2 = p[i - 1]
if i + 2 < N:
p3 = p[i + 2]
m0 = toVector(p1 - p2) * 0.5
m1 = toVector(p3 - p0) * 0.5
px = (c0 * toVector(p0)) + (c1 * m0) + (c2 * toVector(p1)) + (c3 * m1)
interp.append(toPoint(px))
# pop back the last one
interp.pop()
# make sure the last point in the original polygon is still the last one
interp.append(p[-1])
return interp
|
fb63e67b2bf9fd78e04436cd7f12d214bb6904c7
| 3,639,218
|
def pdf_from_ppf(quantiles, ppfs, edges):
"""
Reconstruct pdf from ppf and evaluate at desired points.
Parameters
----------
quantiles: numpy.ndarray, shape=(L)
L quantiles for which the ppf_values are known
ppfs: numpy.ndarray, shape=(1,...,L)
Corresponding ppf-values for all quantiles
edges: numpy.ndarray, shape=(M+1)
Binning of the desired binned pdf
Returns
-------
pdf_values: numpy.ndarray, shape=(1,...,M)
Recomputed, binned pdf
"""
# recalculate pdf values through numerical differentiation
pdf_interpolant = np.nan_to_num(np.diff(quantiles) / np.diff(ppfs, axis=-1))
# Unconventional solution to make this usable with np.apply_along_axis for readability
# The ppf bin-mids are computed since the pdf-values are derived through derivation
# from the ppf-values
xyconcat = np.concatenate(
(ppfs[..., :-1] + np.diff(ppfs) / 2, pdf_interpolant), axis=-1
)
def interpolate_ppf(xy):
ppf = xy[:len(xy) // 2]
pdf = xy[len(xy) // 2:]
interpolate = interp1d(ppf, pdf, bounds_error=False, fill_value=(0, 0))
result = np.nan_to_num(interpolate(edges[:-1]))
return np.diff(edges) * result
# Interpolate pdf samples and evaluate at bin edges, weight with the bin_width to estimate
# correct bin height via the midpoint rule formulation of the trapezoidal rule
pdf_values = np.apply_along_axis(interpolate_ppf, -1, xyconcat)
return pdf_values
|
52c3d19ee915d1deeb99f39ce036deca59c536b3
| 3,639,219
|
import types
import re
def get_arg_text(ob):
"""Get a string describing the arguments for the given object"""
arg_text = ""
if ob is not None:
arg_offset = 0
if type(ob) in (types.ClassType, types.TypeType):
# Look for the highest __init__ in the class chain.
fob = _find_constructor(ob)
if fob is None:
fob = lambda: None
else:
arg_offset = 1
elif type(ob)==types.MethodType:
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
fob = ob.im_func
arg_offset = 1
else:
fob = ob
# Try to build one for Python defined functions
if type(fob) in [types.FunctionType, types.LambdaType]:
argcount = fob.func_code.co_argcount
real_args = fob.func_code.co_varnames[arg_offset:argcount]
defaults = fob.func_defaults or []
defaults = list(map(lambda name: "=%s" % repr(name), defaults))
defaults = [""] * (len(real_args) - len(defaults)) + defaults
items = map(lambda arg, dflt: arg + dflt, real_args, defaults)
if fob.func_code.co_flags & 0x4:
items.append("...")
if fob.func_code.co_flags & 0x8:
items.append("***")
arg_text = ", ".join(items)
arg_text = "(%s)" % re.sub("\.\d+", "<tuple>", arg_text)
# See if we can use the docstring
doc = getattr(ob, "__doc__", "")
if doc:
doc = doc.lstrip()
pos = doc.find("\n")
if pos < 0 or pos > 70:
pos = 70
if arg_text:
arg_text += "\n"
arg_text += doc[:pos]
return arg_text
|
5dc6d262dfe7e10a5ba93fd26c49a0d6bae3bb37
| 3,639,220
|
import random
def create_ses_weights(d, ses_col, covs, p_high_ses, use_propensity_scores):
"""
Used for training preferentially on high or low SES people. If use_propensity_scores is True, uses propensity score matching on covs.
Note: this samples from individual images, not from individual people. I think this is okay as long as we're clear about what's being done. If p_high_ses = 0 or 1, both sampling methods are equivalent. One reason to sample images rather than people is that if you use propensity score weighting, covs may change for people over time.
"""
assert p_high_ses >= 0 and p_high_ses <= 1
high_ses_idxs = (d[ses_col] == True).values
n_high_ses = high_ses_idxs.sum()
n_low_ses = len(d) - n_high_ses
assert pd.isnull(d[ses_col]).sum() == 0
n_to_sample = min(n_high_ses, n_low_ses) # want to make sure train set size doesn't change as we change p_high_ses from 0 to 1 so can't have a train set size larger than either n_high_ses or n_low_ses
n_high_ses_to_sample = int(p_high_ses * n_to_sample)
n_low_ses_to_sample = n_to_sample - n_high_ses_to_sample
all_idxs = np.arange(len(d))
high_ses_samples = np.array(random.sample(list(all_idxs[high_ses_idxs]), n_high_ses_to_sample))
low_ses_samples = np.array(random.sample(list(all_idxs[~high_ses_idxs]), n_low_ses_to_sample))
print("%i high SES samples and %i low SES samples drawn with p_high_ses=%2.3f" %
(len(high_ses_samples), len(low_ses_samples), p_high_ses))
# create weights.
weights = np.zeros(len(d))
if len(high_ses_samples) > 0:
weights[high_ses_samples] = 1.
if len(low_ses_samples) > 0:
weights[low_ses_samples] = 1.
if not use_propensity_scores:
assert covs is None
weights = weights / weights.sum()
return weights
else:
assert covs is not None
# fit probability model
propensity_model = sm.Logit.from_formula('%s ~ %s' % (ses_col, '+'.join(covs)), data=d).fit()
print("Fit propensity model")
print(propensity_model.summary())
# compute inverse propensity weights.
# "A subject's weight is equal to the inverse of the probability of receiving the treatment that the subject actually received"
# The treatment here is whether they are high SES,
# and we are matching them on the other covariates.
high_ses_propensity_scores = propensity_model.predict(d).values
high_ses_weights = 1 / high_ses_propensity_scores
low_ses_weights = 1 / (1 - high_ses_propensity_scores)
propensity_weights = np.zeros(len(d))
propensity_weights[high_ses_idxs] = high_ses_weights[high_ses_idxs]
propensity_weights[~high_ses_idxs] = low_ses_weights[~high_ses_idxs]
assert np.isnan(propensity_weights).sum() == 0
# multply indicator vector by propensity weights.
weights = weights * propensity_weights
# normalize weights so that high and low SES sum to the right things.
print(n_high_ses_to_sample, n_low_ses_to_sample)
if n_high_ses_to_sample > 0:
weights[high_ses_idxs] = n_high_ses_to_sample * weights[high_ses_idxs] / weights[high_ses_idxs].sum()
if n_low_ses_to_sample > 0:
weights[~high_ses_idxs] = n_low_ses_to_sample * weights[~high_ses_idxs] / weights[~high_ses_idxs].sum()
assert np.isnan(weights).sum() == 0
# normalize whole vector, just to keep things clean
weights = weights / weights.sum()
return weights
|
de5b401ef1419d61664c565f5572d3dd80c6fdfb
| 3,639,221
|
def decoder_g(zxs):
"""Define decoder."""
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
hidden_layer = zxs
for i, n_hidden_units in enumerate(FLAGS.n_hidden_units_g):
hidden_layer = tf.layers.dense(
hidden_layer,
n_hidden_units,
activation=tf.nn.relu,
name='decoder_{}'.format(i),
reuse=tf.AUTO_REUSE,
kernel_initializer='normal')
i = len(FLAGS.n_hidden_units_g)
y_hat = tf.layers.dense(
hidden_layer,
FLAGS.dim_y,
name='decoder_{}'.format(i),
reuse=tf.AUTO_REUSE,
kernel_initializer='normal')
return y_hat
|
6974624dccecae7bbb5f650f0ebe0c819df4aa67
| 3,639,223
|
def make_evinfo_str(json_str):
"""
[メソッド概要]
DB登録用にイベント情報を文字列に整形
"""
evinfo_str = ''
for v in json_str[EventsRequestCommon.KEY_EVENTINFO]:
if evinfo_str:
evinfo_str += ','
if not isinstance(v, list):
evinfo_str += '"%s"' % (v)
else:
temp_val = '['
for i, val in enumerate(v):
if i > 0:
temp_val += ','
temp_val += '"%s"' % (val)
temp_val += ']'
evinfo_str += '%s' % (temp_val)
return evinfo_str
|
6717652f1adf227b03864f8b4b4268524eb7cbc4
| 3,639,224
|
def parse_cisa_data(parse_file: str) -> object:
"""Parse the CISA Known Exploited Vulnerabilities file and create a new dataframe."""
inform("Parsing results")
# Now parse CSV using pandas, GUID is CVE-ID
new_dataframe = pd.read_csv(parse_file, parse_dates=['dueDate', 'dateAdded'])
# extend dataframe
new_dataframe['AssetsVulnerableCount'] = int(0)
pd.to_numeric(new_dataframe['AssetsVulnerableCount'])
# force these fields to be dtype objects
new_dataframe['AssetsVulnerable'] = pd.NaT
new_dataframe['AssetsVulnerable'] = new_dataframe['AssetsVulnerable'].astype('object').dtypes
return new_dataframe
|
7bc95a4d60b869395f20d8619f80b116156de4ad
| 3,639,225
|
def camera():
"""Video streaming home page."""
return render_template('index.html')
|
75c501daa3d9a8b0090a0e9174b29a0b848057be
| 3,639,226
|
import tqdm
def fit_alternative(model, dataloader, optimizer, train_data, labelled=True):
"""
fit method using alternative loss, executes one epoch
:param model: VAE model to train
:param dataloader: input dataloader to fatch batches
:param optimizer: which optimizer to utilize
:param train_data: useful for plotting completion bar
:param labelled: to know if the data is composed of (data, target) or only data
:return: train loss
"""
model.train() # set in train mode
running_loss, running_kld_loss, running_rec_loss = 0.0, 0.0, 0.0 # set up losses to accumulate over
for i, data in tqdm(enumerate(dataloader), total=int(len(train_data) / dataloader.batch_size)):
data = data[0] if labelled else data # get the train batch
data = data.view(data.size(0), -1) # unroll
optimizer.zero_grad() # set gradient to zero
mu_rec, mu_latent, logvar_latent = model(data) # feedforward
loss = elbo_loss_alternative(mu_rec, model.log_var_rec, mu_latent, logvar_latent, data) # get loss value
# update losses
running_kld_loss += loss[0].item()
running_rec_loss += loss[1].item()
running_loss += loss[2].item()
loss[2].backward() # set up gradient with total loss
optimizer.step() # backprop
# set up return variable for all three losses
train_loss = [running_kld_loss / len(dataloader.dataset),
running_rec_loss / len(dataloader.dataset),
running_loss / len(dataloader.dataset)]
return train_loss
|
3889d2d72ce71095d3016427c87795ef65aa9fa4
| 3,639,228
|
def FlagOverrider(**flag_kwargs):
"""A Helpful decorator which can switch the flag values temporarily."""
return flagsaver.flagsaver(**flag_kwargs)
|
39a39b1884c246ae45d8166c2eae9bb68dea2c70
| 3,639,229
|
def cli(ctx, path, max_depth=1):
"""List files available from a remote repository for a local path as a tree
Output:
None
"""
return ctx.gi.file.tree(path, max_depth=max_depth)
|
4be4fdffce7862332aa27a40ee684aae31fd67b5
| 3,639,230
|
def warp_p(binary_img):
"""
Warps binary_image using hard coded source and destination
vertices. Returns warped binary image, warp matrix and
inverse matrix.
"""
src = np.float32([[580, 450],
[180, 720],
[1120, 720],
[700, 450]])
dst = np.float32([[350, 0],
[350, 720],
[900, 720],
[900, 0]])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
binary_warped = cv2.warpPerspective(binary_img, M, (binary_img.shape[1], binary_img.shape[0]), flags=cv2.INTER_LINEAR)
return binary_warped, M, Minv
|
ea0ca98138ff9fbf52201186270c3d2561f57ec2
| 3,639,231
|
def _get_xml_sps(document):
"""
Download XML file and instantiate a `SPS_Package`
Parameters
----------
document : opac_schema.v1.models.Article
Returns
-------
dsm.data.sps_package.SPS_Package
"""
# download XML file
content = reqs.requests_get_content(document.xml)
xml_sps = SPS_Package(content)
# change assets uri
xml_sps.remote_to_local(xml_sps.package_name)
return xml_sps
|
908ceb96ca2b524899435f269e60ddd9b7db3f0c
| 3,639,232
|
def plot_confusion_matrix(ax, y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
From scikit-learn example:
https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
# print(cm)
# fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
# fig.tight_layout()
return ax
|
ba88d9f96f9b9da92987fa3df4d38270162fc903
| 3,639,233
|
def _in_docker():
""" Returns: True if running in a Docker container, else False """
with open('/proc/1/cgroup', 'rt') as ifh:
if 'docker' in ifh.read():
print('in docker, skipping benchmark')
return True
return False
|
4a0fbd26c5d52c5fe282b82bc4fe14986f8aef4f
| 3,639,234
|
def asPosition(flags):
""" Translate a directional flag from an actions into a tuple indicating
the targeted tile. If no directional flag is found in the inputs,
returns (0, 0).
"""
if flags & NORTH:
return 0, 1
elif flags & SOUTH:
return 0, -1
elif flags & EAST:
return 1, 0
elif flags & WEAST:
return -1, 0
return 0, 0
|
9e1b2957b1cd8b71033b644684046e71e85f5105
| 3,639,235
|
from nibabel import load
import numpy as np
def pickvol(filenames, fileidx, which):
"""Retrieve index of named volume
Parameters
----------
filenames: list of 4D file names
fileidx: which 4D file to look at
which: 'first' or 'middle'
Returns
-------
idx: index of first or middle volume
"""
if which.lower() == 'first':
idx = 0
elif which.lower() == 'middle':
idx = int(np.ceil(load(filenames[fileidx]).get_shape()[3] / 2))
else:
raise Exception('unknown value for volume selection : %s' % which)
return idx
|
7090ab35959289c221b6baab0ba1719f0c518ef4
| 3,639,236
|
def merge(d, **kwargs):
"""Recursively merges given kwargs int to a
dict - only if the values are not None.
"""
for key, value in kwargs.items():
if isinstance(value, dict):
d[key] = merge(d.get(key, {}), **value)
elif value is not None:
d[key] = value
return d
|
168cc66cce0a04b086a17089ebcadc16fbb4c1d0
| 3,639,237
|
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.VelbusConfigFlow()
flow.hass = hass
return flow
|
6eccc23ceca6b08268701486ed2e79c47c220e13
| 3,639,238
|
from typing import Dict
from datetime import datetime
from typing import FrozenSet
def read_service_ids_by_date(path: str) -> Dict[datetime.date, FrozenSet[str]]:
"""Find all service identifiers by date"""
feed = load_raw_feed(path)
return _service_ids_by_date(feed)
|
60e39ccb517f00243db97835b223e894c9f64540
| 3,639,239
|
def get_all_services(org_id: str) -> tuple:
"""
**public_services_api**
returns a service governed by organization_id and service_id
:param org_id:
:return:
"""
return services_view.return_services(organization_id=org_id)
|
d779e7312d363ad507c994c38ba844912bf49e9c
| 3,639,240
|
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
|
fa6aca01bd96c6cb97af5e68f4221d285e482612
| 3,639,241
|
import math
def findh_s0(h_max, h_min, q):
"""
Znajduje siłę naciągu metodą numeryczną (wykorzystana metoda bisekcji),
należy podać granice górną i dolną dla metody bisekcji
:param h_max: Górna granica dla szukania siły naciągu
:param h_min: Dolna granica dla szukania siły naciągu
:param q: całkowite obciążenie kabla [N/m]
:return: h - siła naciągu, i - ilość potrzebnych iteracji
"""
i = 1
h = (h_min + h_max) / 2
print("Wstępne H = " + str(h))
f_m = calculatefm(h, q)
while (math.fabs(f_m - f_0_m) >= 1 * 10 ** -8):
if f_m < f_0_m:
h_max = h
else:
h_min = h
# print("iteracja #" + str(i) + " h_max = " + str(h_max) + " h_min = "
# + str(h_min) + " nowe H: " + str(h) + " f_m = " + str(f_m)
# + " docelowe: " + str(f_0_m))
h = (h_min + h_max) / 2
f_m = calculatefm(h, q)
i += 1
return h, i
|
28926742c6d786ffa47a084a318f54fafb3da98c
| 3,639,242
|
def velocity_dependent_covariance(vel):
"""
This function computes the noise in the velocity channel.
The noise generated is gaussian centered around 0, with sd = a + b*v;
where a = 0.01; b = 0.05 (Vul, Frank, Tenenbaum, Alvarez 2009)
:param vel:
:return: covariance
"""
cov = []
for v in vel:
ans = 0.01 + 0.05 * np.linalg.norm(vel)
cov.append(ans)
cov = np.array(cov)
return cov
|
4a1bb6c8f6c5956585bd6f5a09f4d80ee397bbe5
| 3,639,243
|
def msd_Correlation(allX):
"""Autocorrelation part of MSD."""
M = allX.shape[0]
# numpy with MKL (i.e. intelpython distribution), the fft wont be
# accelerated unless axis along 0 or -1
# perform FT along n_frame axis
# (n_frams, n_particles, n_dim) -> (n_frames_Ft, n_particles, n_dim)
allFX = np.fft.rfft(allX, axis=0, n=M*2)
# sum over n_dim axis
corr = np.sum(abs(allFX)**2, axis=(1, -1)) # (n_frames_ft,)
# IFT over n_frame_ft axis (axis=0), whole operation euqals to
# fx = fft(_.T[0]), fy =... for _ in
# allX.swapaxes(0,1) -> (n_particles, n_frames, n_dim)
# then sum fx, fy, fz...fndim
# rfft for real inputs, higher eff
return np.fft.irfft(corr, n=2 * M)[:M].real/np.arange(M, 0, -1)
# (n_frames,), the n_particles dimension is added out
|
c212e216d32814f70ab861d066c8000cf7e8e238
| 3,639,245
|
import math
def convert_table_value(fuel_usage_value):
"""
The graph is a little skewed, so this prepares the data for that.
0 = 0
1 = 25%
2 = 50%
3 = 100%
4 = 200%
5 = 400%
6 = 800%
7 = 1600% (not shown)
Intermediate values scale between those values. (5.5 is 600%)
"""
if fuel_usage_value < 25:
return 0.04 * fuel_usage_value
else:
return math.log((fuel_usage_value / 12.5), 2)
|
15e4deedb4809eddd830f7d586b63075b71568ef
| 3,639,246
|
import TestWin
def FindMSBuildInstallation(msvs_version = 'auto'):
"""Returns path to MSBuild for msvs_version or latest available.
Looks in the registry to find install location of MSBuild.
MSBuild before v4.0 will not build c++ projects, so only use newer versions.
"""
registry = TestWin.Registry()
msvs_to_msbuild = {
'2013': r'12.0',
'2012': r'4.0', # Really v4.0.30319 which comes with .NET 4.5.
'2010': r'4.0'}
msbuild_basekey = r'HKLM\SOFTWARE\Microsoft\MSBuild\ToolsVersions'
if not registry.KeyExists(msbuild_basekey):
print 'Error: could not find MSBuild base registry entry'
return None
msbuild_version = None
if msvs_version in msvs_to_msbuild:
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding MSBuild "%s" was not found.' %
(msvs_version, msbuild_version))
if not msbuild_version:
for msvs_version in sorted(msvs_to_msbuild, reverse=True):
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
break
if not msbuild_version:
print 'Error: could not find MSBuild registry entry'
return None
msbuild_path = registry.GetValue(msbuild_basekey + '\\' + msbuild_version,
'MSBuildToolsPath')
if not msbuild_path:
print 'Error: could not get MSBuild registry entry value'
return None
return os.path.join(msbuild_path, 'MSBuild.exe')
|
daf5151c08e52b71110075b3dd59071a3a6f124f
| 3,639,247
|
def create_toc_xhtml(metadata: WorkMetadata, spine: list[Matter]) -> str:
"""
Load the default `toc.xhtml` file, and generate the required terms for the creative work. Return xhtml as a string.
Parameters
----------
metadata: WorkMetadata
All the terms for updating the work, not all compulsory
spine: list of Matter
Spine and guide list of Matter, with `dedication` at 0, if present
Returns
-------
str: xhtml response for `toc.xhtml` as a str.
"""
with open(DATA_PATH / "xhtml" / DEFAULT_TOC_XHTML, "r+", encoding="utf-8") as toc_file:
toc_xml = toc_file.read()
# Table of Contents
toc_xhtml = ""
chapter = 1
for matter in spine:
if matter.content == FrontMatter.dedication:
toc_xhtml += F'\t\t\t\t<li>\n\t\t\t\t\t<a href="text/dedication.xhtml">{matter.title}</a>\n\t\t\t\t</li>\n'
if matter.partition == MatterPartition.body:
toc_xhtml += F'\t\t\t\t<li>\n\t\t\t\t\t<a href="text/chapter-{chapter}.xhtml">{matter.title}</a>\n\t\t\t\t</li>\n'
chapter += 1
toc_xml = toc_xml.replace('\t\t\t\t<li>\n\t\t\t\t\t<a href="text/chapter-1.xhtml"></a>\n\t\t\t\t</li>\n',
toc_xhtml)
# Landmark Title
toc_xml = toc_xml.replace('<a href="text/chapter-1.xhtml" epub:type="bodymatter z3998:fiction">WORK_TITLE</a>',
F'<a href="text/chapter-1.xhtml" epub:type="bodymatter z3998:fiction">{metadata.title}</a>')
return toc_xml
|
9971d408f39056b6d2078e5157f2c39dbce8c202
| 3,639,248
|
def convertSLToNumzero(sl, min_sl=1e-3):
"""
Converts a (neg or pos) significance level to
a count of significant zeroes.
Parameters
----------
sl: float
Returns
-------
float
"""
if np.isnan(sl):
return 0
if sl < 0:
sl = min(sl, -min_sl)
num_zero = np.log10(-sl)
elif sl > 0:
sl = max(sl, min_sl)
num_zero = -np.log10(sl)
else:
raise RuntimeError("Cannot have significance level of 0.")
return num_zero
|
c8cbea09904a7480e36529ffc7a62e6cdddc7a47
| 3,639,249
|
def calibrate_time_domain(power_spectrum, data_pkt):
"""
Return a list of the calibrated time domain data
:param list power_spectrum: spectral data of the time domain data
:param data_pkt: a RTSA VRT data packet
:type data_pkt: pyrf.vrt.DataPacket
:returns: a list containing the calibrated time domain data
"""
i_data, q_data, stream_id, spec_inv = _decode_data_pkts(data_pkt)
# Time domain data calibration
if stream_id in (VRT_IFDATA_I14, VRT_IFDATA_I24):
td_data = i_data -np.mean(i_data)
complex_coefficient = 1
if stream_id == VRT_IFDATA_I14Q14:
td_data = i_data + 1j * q_data
td_data = td_data - np.mean(td_data)
complex_coefficient = 2
P_FD_Ln = 10**(power_spectrum/10)
P_FD_av = np.mean(P_FD_Ln)
v_volt = td_data * np.sqrt(1e-3) * np.sqrt(P_FD_av/np.var(td_data)) * 50 * np.sqrt(complex_coefficient*len(td_data)/128.0)
return v_volt
|
a4bfa279ac4ada5ffe6d7bd6e8cf64e59ae0bf61
| 3,639,250
|
def func(x):
"""
:param x: [b, 2]
:return:
"""
z = tf.math.sin(x[...,0]) + tf.math.sin(x[...,1])
return z
|
daf4e05c6a8c1f735842a0ef6fa115b14e85ef40
| 3,639,251
|
from typing import Tuple
from typing import Dict
from typing import Any
from typing import List
def parse_handler_input(handler_input: HandlerInput,
) -> Tuple[UserMessage, Dict[str, Any]]:
"""Parses the ASK-SDK HandlerInput into Slowbro UserMessage.
Returns the UserMessage object and serialized SessionAttributes.
"""
request_envelope = handler_input.request_envelope
text: str
asr_hypos: List[AsrHypothesisUtterance] = []
if is_request_type("LaunchRequest")(handler_input):
# This is a launch request.
text = ''
elif is_request_type("IntentRequest")(handler_input):
slots = request_envelope.request.intent.slots
slot_text = slots.get('Text', None)
if slot_text is not None:
text = slot_text.value
else:
text = ''
if hasattr(request_envelope.request, 'speechRecognition'):
hypotheses = request_envelope.request.speechRecognition.get(
'hypotheses', [])
asr_hypos.extend([
AsrHypothesisUtterance([
AsrHypothesisToken(token['value'], token['confidence'],
token['startOffsetInMilliseconds'],
token['endOffsetInMilliseconds'])
for token in hypo['tokens']
], hypo['confidence']) for hypo in hypotheses
])
elif text:
# NOTE: create a fake ASR hypo using the text field.
asr_hypos.extend([
AsrHypothesisUtterance([
AsrHypothesisToken(token, -1, -1, -1)
for token in text.split(' ')
], -1)
])
if not text:
# Try to recover the text using asr_hypos.
# Otherwise, raise an exception.
if asr_hypos:
text = asr_hypos[0].__str__()
else:
raise Exception('Unable to find "text" from handler input:',
handler_input)
else:
raise Exception('Unable to parse handler input:', handler_input)
serializer = DefaultSerializer()
user_message = UserMessage(payload=serializer.serialize(request_envelope),
channel='alexaprize',
request_id=request_envelope.request.request_id,
session_id=request_envelope.session.session_id,
user_id=request_envelope.session.user.user_id,
text=text,
asr_hypos=asr_hypos)
attributes_manager = handler_input.attributes_manager
ser_session_attributes = attributes_manager.session_attributes
return (user_message, ser_session_attributes)
|
5be16af3f460de41af9e33cacc4ce94c447ceb45
| 3,639,252
|
def _validate_show_for_invoking_user_only(show_for_invoking_user_only):
"""
Validates the given `show_for_invoking_user_only` value.
Parameters
----------
show_for_invoking_user_only : `None` or `bool`
The `show_for_invoking_user_only` value to validate.
Returns
-------
show_for_invoking_user_only : `bool`
The validated `show_for_invoking_user_only` value.
Raises
------
TypeError
If `show_for_invoking_user_only` was not given as `None` nor as `bool` instance.
"""
if show_for_invoking_user_only is None:
show_for_invoking_user_only = False
else:
show_for_invoking_user_only = preconvert_bool(
show_for_invoking_user_only, 'show_for_invoking_user_only'
)
return show_for_invoking_user_only
|
a1f9612927dfc1423d027f242d759c982b11a8b8
| 3,639,253
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.