content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import time
def solveSudoku(fileName = "", showResults = False, showTime = False, matrix = []):
"""
Solves a Sudoku by prompting the sudoku or reading a text file containing the sudoku or by directly
taking the matrix as a variable and either shows the solution or returns it. Can also tell the execution time
(Any one of the arguments 'fileName' or 'matrix' should be given. Else rises ValueError)
args:
-fileName - Name of the text file in which sudoku is present (optional)
-showResults - Prints the solution if set true. Else returns the solution (optional)
-showTime - Calculates and shows the execution time only if set true (optional)
-martix - 9x9 sudoku matrix (optional)
returns: If 'showResults' parameter is given true, it returns the 9x9 solved sudoku list
Else simply prints the solution
"""
if fileName == "" and matrix == []: rows = prompt_sudoku()
elif fileName != "" and matrix == []: rows = get_sudoku(fileName)
elif fileName == "" and matrix != []: rows = matrix
elif fileName != "" and matrix !=[]: raise ValueError("Please give any of the arguments, 'fileName' or 'matrix' (Both are given)")
st = time.perf_counter()
all_combo = []
vert = vertical(rows)
blocks = blockify(rows)
for i in rows:
all_combo.append(insert_combos(i, vert, blocks,rows.index(i)))
a = all_combo.copy()
for r1 in a[0]:
for r2 in a[1]:
if vertically_has_duplicates(r1,r2): continue
for r3 in a[2]:
if vertically_has_duplicates(r1,r2,r3) or blocks_has_duplicates([r1,r2,r3]): continue
for r4 in a[3]:
if vertically_has_duplicates(r1,r2,r3,r4): continue
for r5 in a[4]:
if vertically_has_duplicates(r1,r2,r3,r4,r5): continue
for r6 in a[5]:
if vertically_has_duplicates(r1,r2,r3,r4,r5,r6) or blocks_has_duplicates([r4,r5,r6]): continue
for r7 in a[6]:
if vertically_has_duplicates(r1,r2,r3,r4,r5,r6,r7): continue
for r8 in a[7]:
if vertically_has_duplicates(r1,r2,r3,r4,r5,r6,r7,r8): continue
for r9 in a[8]:
try_sol = [r1,r2,r3,r4,r5,r6,r7,r8,r9]
if vertically_has_duplicates(r1,r2,r3,r4,r5,r6,r7,r8,r9) or blocks_has_duplicates([r7,r8,r9]): continue
time_taken = 'Time Taken: '+str(round(time.perf_counter()-st, 4))+'s'
if showResults:
for row in try_sol:
print(row)
if showTime: print(time_taken)
else:
if showTime: try_sol.append(time_taken)
return try_sol
|
a54cd39111638b9a02845781e7b731ca32d11089
| 3,638,798
|
def vertexval(val, size):
"""Converto to row,col or raise GTP error."""
val = val.lower()
if val == 'pass':
return None
letter = str(val[0])
number = int(val[1:], 10)
if not 'a' <= letter <= 'z':
raise GTPError('invalid vertex letter: {!r}'.format(val))
if number < 1:
raise GTPError('invalid vertex number: {!r}'.format(val))
row = size - number
col = ord(letter) - (ord('a') if letter < 'i' else ord('b'))
if 0 > row or row >= size or 0 > col or col >= size:
raise GTPError('off board')
return row, col
|
610441915c1d46baf9157a849140d1db7bf6f3d5
| 3,638,799
|
def v0abs(x_ratio, distance_source, lr_angle, br_angle):
""" Returns the norm for the velocity reference v_0"""
vy_comp = vly(x_ratio, distance_source, lr_angle, br_angle) - (1.-x_ratio)*voy(lr_angle)
vz_comp = vlz(x_ratio, distance_source, lr_angle, br_angle) - (1.-x_ratio)*voz(lr_angle, br_angle)
#vy = - (1.-x)*voy(lr_angle,b)
#vz = - (1.-x)*voz(lr_angle,b)
return np.sqrt(vy_comp**2+vz_comp**2)
|
d2f55f1b92912d457a50cd7f89680b2452a6d2d4
| 3,638,800
|
def copy(stream, credentials, direction, hdfsFile=None, hdfsFileAttrName=None, localFile=None, name=None):
"""Copy a Hadoop Distributed File to local and copy a local file to te HDFS.
Repeatedly scans a HDFS directory and writes the names of new or modified files that are found in the directory to the output stream.
Args:
topology(Topology): Topology to contain the returned stream.
credentials(dict|str|file): The credentials of the IBM cloud Analytics Engine service in *JSON* (idct) or JSON string (str) or the path to the *configuration file* (``hdfs-site.xml`` or ``core-site.xml``). If the *configuration file* is specified, then this file will be copied to the 'etc' directory of the application bundle.
direction(str): This mandatory parameter specifies the direction of copy. The parameter can be set with the following values. **'copyFromLocalFile'** Copy a file from local disk to the HDFS file system. **'copyToLocalFile'** Copy a file from HDFS file system to the local disk.
hdfsFile(str): This parameter Specifies the name of HDFS file or directory. If the name starts with a slash, it is considered an absolute path of HDFS file that you want to use. If it does not start with a slash, it is considered a relative path, relative to the /user/userid/hdfsFile .
localFile(str): This parameter specifies the name of local file to be copied. If the name starts with a slash, it is considered an absolute path of local file that you want to copy. If it does not start with a slash, it is considered a relative path, relative to your project data directory.
schema(Schema): Optional output stream schema. Default is ``CommonSchema.String``. Alternative a structured streams schema with a single attribute of type ``rstring`` is supported.
name(str): Source name in the Streams context, defaults to a generated name.
Returns:
Output Stream containing the result message and teh elapsed time with schema :py:const:`~streamsx.hdfs.FileCopySchema`.
"""
Direction=_convert_copy_direction_string_to_enum(direction)
credentials, hdfsUri, hdfsUser, hdfsPassword, configPath = _setCredentials(credentials, stream.topology)
_op = _HDFS2FileCopy(stream, configPath=configPath, credentials=credentials, hdfsUri=hdfsUri, hdfsUser=hdfsUser, hdfsPassword=hdfsPassword, direction=Direction, hdfsFileAttrName=hdfsFileAttrName, localFile=localFile , schema=FileCopySchema, name=name)
return _op.outputs[0]
|
843d36d1a37591761d2791f7d78c5c3273e3abf8
| 3,638,802
|
from typing import List
def normalize(value: str) -> str:
"""Normalize a string by removing '-' and capitalizing the following character"""
char_list: List[str] = list(value)
length: int = len(char_list)
for i in range(1, length):
if char_list[i - 1] in ['-']:
char_list[i] = char_list[i].upper()
return ''.join(char_list).replace('-', '')
|
52c1c8b5e950347cf63ed15d1efde47046b07873
| 3,638,803
|
def save_data(userId, columns, tableName, searchTerm="", objType="", sortBy=None,
tableId=None, isDefaultOnDashboard=False, maxRows=0,
dashboard=None, clone=False, row=0, grid_col=0, sizex=0,
sizey=0):
"""
Saves the customized table in the dashboard. Called by save_search and
save_new_dashboard via ajax in views.py.
"""
try:
if searchTerm:
searchTerm = HTMLParser.HTMLParser().unescape(searchTerm)
#if user is editing a table
if tableId :
newSavedSearch = SavedSearch.objects(id=tableId).first()
if not newSavedSearch:
raise Exception("Cannot find Table")
elif clone:
clonedSavedSearch = cloneSavedSearch(newSavedSearch, dashboard.id)
else:
newSavedSearch = SavedSearch()
cols = []
for col in columns:
if "field" not in col or "caption" not in col:
continue
cols.append(col)
if not cols:
raise("There are no columns to save")
newSavedSearch.tableColumns = cols
newSavedSearch.name = tableName
oldDashId = None
if dashboard and newSavedSearch.dashboard != dashboard.id:
newSavedSearch.dashboard= dashboard.id
#if it is not a deault dashboard table, it must have a searchterm and objtype
if searchTerm:
newSavedSearch.searchTerm = searchTerm
if objType:
newSavedSearch.objType = objType
#this is to identify the default tables on every user dashboard
newSavedSearch.isDefaultOnDashboard = isDefaultOnDashboard
if sortBy:
newSavedSearch.sortBy = sortBy
if sizex:
newSavedSearch.sizex = sizex
elif not newSavedSearch.sizex:
newSavedSearch.sizex = 50
if sizey:
newSavedSearch.sizey = sizey
elif maxRows and maxRows != newSavedSearch.maxRows:
newSavedSearch.sizey = int(maxRows)+1
elif not newSavedSearch.sizey:
newSavedSearch.sizey = 7
if row:
newSavedSearch.row = row
elif not newSavedSearch.row:
newSavedSearch.row = 1
if grid_col:
newSavedSearch.col = grid_col
elif not newSavedSearch.col:
newSavedSearch.col = 1
if maxRows:
newSavedSearch.maxRows = maxRows;
newSavedSearch.save()
#if the old dashboard is empty, delete it
if oldDashId:
deleteDashboardIfEmpty(oldDashId)
except Exception as e:
print "ERROR: "
print e
return {'success': False,
'message': "An unexpected error occurred while saving table. Please refresh and try again"}
return {'success': True,'message': tableName+" Saved Successfully!"}
|
da6491135246ad785f95333eb30d4316971f51fe
| 3,638,804
|
def heatmap(plot,
client_color=False,
low=(255, 200, 200), high=(255, 0, 0),
spread=0, transform="cbrt", **kwargs):
"""
Produce a heatmap from a set of shapes.
A heatmap is a scale of how often a single thing occurs.
This is a convenience function that encodes a common configuration,
and parameters to support the most common variations.
plot -- Plot to convert into a heatmap
low -- Low color of the heatmap. Default is a light red
high -- High color of the heatmap. Default is full saturation red.
spread -- How far (if any) should values be spread. Default is 0.
transform -- Apply a transformation before building a color ramp?
Understood values are 'cbrt', 'log', 'none' and None.
The default is 'cbrt', for cuberoot, an approximation of
perceptual correction on monochromatic scales.
kwargs -- Further arguments passed on to replot for greater control.
"""
transform = transform.lower() if transform is not None else None
if client_color:
shader = Id()
kwargs['reserve_val'] = kwargs.get('reserve_val', 0)
else:
shader = InterpolateColor(low=low, high=high)
if transform == "cbrt":
shader = Cuberoot() + shader
elif transform == "log":
shader = Log() + shader
elif transform == "none" or transform is None:
pass
else:
raise ValueError("Unrecognized transform '{0}'".format(transform))
if spread > 0:
shader = Spread(factor=spread, shape="circle") + shader
kwargs['points'] = kwargs.get('points', True)
return replot(plot,
agg=Count(),
info=Const(val=1),
shader=shader,
**kwargs)
|
3e4b319b414cc7b751d44c0ef61db1e810d638a3
| 3,638,805
|
def add_comment_to_task(task_id, comment, status=None):
"""
Adds comment to given task
:param task_id:
:param comment:
:param status:
:return:
"""
task = get_task(task_id, as_dict=True)
if not task:
return
if not status:
status = gazu.task.get_task_status(task)
else:
status = gazu.task.get_task_status_by_name(status)
if not status:
return
return gazu.task.add_comment(task, status, comment)
|
2cd8d109c2420c10d5924178dbc491dfd5956ccf
| 3,638,806
|
def score(h,r,t):
"""
:param h: (batch_size, dim)
:param r: (dim, )
:param t: (dim, )
:return:
"""
return np.dot(h, np.transpose(r*t))
|
afacedde04ea12a310301f097e2200bab2ef7adc
| 3,638,807
|
def migrate_file(file_name, file_content):
"""Migrate file."""
return V4Migrator(file_name, file_content).migrate()
|
7828fa20f49594aae59cbce3a03af42f14eac28d
| 3,638,808
|
def _GetOrganizedAnalysisResultBySuspectedCL(analysis_result):
"""Group tests it they have the same suspected CLs."""
organized_results = defaultdict(list)
if not analysis_result:
return organized_results
for step_failure in analysis_result.get('failures', []):
step_name = step_failure['step_name']
supported = step_failure.get('supported', True)
step_revisions_index = {}
organized_suspected_cls = organized_results[step_name]
is_flaky = step_failure.get('flaky', False)
if not step_failure.get('tests'):
# Non swarming, just group the whole step together.
shared_result = {
'first_failure': step_failure['first_failure'],
'last_pass': step_failure.get('last_pass'),
'supported': supported,
'tests': [],
'suspected_cls': step_failure['suspected_cls'],
'flaky': is_flaky,
}
organized_suspected_cls.append(shared_result)
continue
# Swarming tests.
for index, cl in enumerate(step_failure['suspected_cls']):
step_revisions_index[cl['revision']] = index
# Groups tests by suspected CLs' revision.
# Keys are the indices of each test in the test list.
# Format is as below:
# {
# 1: {
# 'tests': ['test1', 'test2'],
# 'revisions': ['rev1'],
# 'suspected_cls': [
# # suspected cl info for rev1 at step level.
# ]
# },
# 3: {
# 'tests': ['test3'],
# 'revisions': ['rev3', 'rev2'],
# 'suspected_cls': [
# # suspected cl info for rev2, rev3 at step level.
# ]
# }
# }
tests_group = defaultdict(list)
for index, test in enumerate(step_failure['tests']):
# Get all revisions for this test and check if there is
# any other test has the same culprit(represented by revision) set and
# are flaky or reliable at the same time.
test_name = test['test_name']
is_flaky = test.get('flaky', False)
found_group = False
revisions = set()
for cl in test['suspected_cls']:
revisions.add(cl['revision'])
for group in tests_group.values():
# Found tests that have the same culprit(represented by revision),
# add current test to group.
if revisions == set(group['revisions']) and is_flaky == group['flaky']:
group['tests'].append(test_name)
found_group = True
break
if not found_group:
# First test with that revision set, add a new group.
group_suspected_cls = []
for revision in revisions:
group_suspected_cls.append(
step_failure['suspected_cls'][step_revisions_index[revision]])
tests_group[index] = {
'tests': [test_name],
'revisions': list(revisions),
'suspected_cls': group_suspected_cls,
'flaky': is_flaky,
}
for index, group in tests_group.iteritems():
# Reorganize heuristic results by culprits.
test_result = step_failure['tests'][index]
shared_result = {
'first_failure': test_result['first_failure'],
'last_pass': test_result.get('last_pass'),
'supported': supported,
'tests': group['tests'],
'suspected_cls': group['suspected_cls'],
'flaky': group['flaky'],
}
organized_suspected_cls.append(shared_result)
return organized_results
|
b9b28a530402d0745f4ec6adb715c3ac0bb9bf4c
| 3,638,809
|
def volume_division(in_volume1, in_volume2):
"""Divide a volume by another one
Args:
in_volume1 (nibabel volume): data will be a [m,n,o] array
in_volume2 (nibabel volume): data will be a [m,n,o] array
Returns:
out_volume (nibabel volume): data will be a [m,n,o] array,
division of in_volume1[array] by in_volume2[array], where
both in_volume1[array] and in_volume2[array] have been
reoriented to a canonical orientation ('RAS')
"""
# read input volumes
#-- convert both volumes to RAS orientation
in_volume1_ras = nib.as_closest_canonical(in_volume1)
in_volume2_ras = nib.as_closest_canonical(in_volume2)
#-- volumes data and affine
in_volume1_data = in_volume1_ras.get_data()
in_volume1_affine = in_volume1_ras.affine.copy()
in_volume2_data = in_volume2_ras.get_data()
#-- sanity check
if in_volume1_data.shape != in_volume2_data.shape:
raise ValueError('the input volumes must have the same size')
#-- get rid of NaN values
in_volume1_data[np.isnan(in_volume1_data)] = 0
in_volume2_data[np.isnan(in_volume2_data)] = 0
# divide the two volumes
#-- check volume 2 pixels 0-intensities
volume2_0_x, volume2_0_y, volume2_0_z = np.where(in_volume2_data == 0)
in_volume2_data[in_volume2_data == 0] = 1
#-- main division
out_volume_data = in_volume1_data/in_volume2_data
#-- re-process volume 2 0-intensity pixels
out_volume_data[volume2_0_x, volume2_0_y, volume2_0_z] = 0
# save output
out_volume_affine = in_volume1_affine.copy()
out_volume = nib.Nifti1Image(out_volume_data, out_volume_affine)
return out_volume
|
271ac81f4166324ca9d20eb6f12cf8efb5c7a4ae
| 3,638,810
|
def sparse_eye(num_rows,
num_columns=None,
dtype=dtypes.float32,
name=None):
"""Creates a two-dimensional sparse tensor with ones along the diagonal.
Args:
num_rows: Non-negative integer or `int32` scalar `tensor` giving the number
of rows in the resulting matrix.
num_columns: Optional non-negative integer or `int32` scalar `tensor` giving
the number of columns in the resulting matrix. Defaults to `num_rows`.
dtype: The type of element in the resulting `Tensor`.
name: A name for this `Op`. Defaults to "eye".
Returns:
A `SparseTensor` of shape [num_rows, num_columns] with ones along the
diagonal.
"""
with ops.name_scope(name, default_name="eye", values=[num_rows, num_columns]):
num_rows = _make_int64_tensor(num_rows, "num_rows")
num_columns = num_rows if num_columns is None else _make_int64_tensor(
num_columns, "num_columns")
# Create the sparse tensor.
diag_size = math_ops.minimum(num_rows, num_columns)
diag_range = math_ops.range(diag_size, dtype=dtypes.int64)
return sparse_tensor.SparseTensor(
indices=array_ops.stack([diag_range, diag_range], axis=1),
values=array_ops.ones(diag_size, dtype=dtype),
dense_shape=[num_rows, num_columns])
|
1b6f47af28e14dfb0cc0dfc3e4d8ced84b8b6c59
| 3,638,811
|
def count_num_sents_cluster(sents_vectors, sections_sents, n_clusters):
"""
Cluster sentences and count the number of times that sentences from each
section appear in each cluster.
Ex: 4 sents from introduction and 3 sentences from conclusion in cluster x.
"""
labels, centroids = cluster_sents(sents_vectors, n_clusters)
sections = ['abstract', 'introduction', 'conclusion', 'text']
sents_cluster_values = []
n_sents_by_cluster = []
for c in range(n_clusters):
n_sents = {}
for sec in sections:
n_sents[sec] = 0.0
# Get indices in c cluster
indices_cluster = np.where(labels == c)[0]
for i in indices_cluster:
if sections_sents[i] != 'highlights':
n_sents[sections_sents[i]] += 1
n_sents_by_cluster.append(n_sents)
for lbl in labels:
sents_cluster_values.append(n_sents_by_cluster[lbl].values())
columns = ['n_sents_intro', 'n_sents_text', 'n_sents_abst',
'n_sents_conclu']
return np.array(sents_cluster_values), columns
|
8ee94a7bc0bb10bfc2af3a03c46717691d534fef
| 3,638,812
|
import hashlib
def check_password_pwned(password, fast=False):
"""Check if a password is in the pwned-passwords list.
:param password: The plaintext password
:param fast: Whether the check should finish quickly, even if that may
indicate not being able to check the password. This should
be used during interactive requests
:return: A bool indicating whether the password has been pwned or not,
or `None` in case checking it was not possible.
"""
timeout = 1 if fast else 3
sha = hashlib.sha1(password.encode()).hexdigest().upper()
hashes = _get_pwned_hashes(sha[:5], timeout)
if hashes is None:
return None
return sha[5:] in hashes
|
c4b7679930708750e8925e65fb6f73bf8283027b
| 3,638,814
|
def is_plugin_loaded(plugin_name):
"""
Return whether given plugin is loaded or not
:param plugin_name: str
:return: bool
"""
return maya.cmds.pluginInfo(plugin_name, query=True, loaded=True)
|
8c57793c620b209f6741c111361ce1c10ca5e7b5
| 3,638,815
|
def drydown_service(lat: float = Query(...), lon: float = Query(...)):
"""Babysteps."""
return handler(lon, lat)
|
7996eaaf6f530815b20b66b5a8acf1f68c47d81c
| 3,638,816
|
def service_vuln_iptable(hostfilter=None):
"""Returns a dict of services. Contains a list of IPs with (vuln, sev)
'0/info': { 'host_id1': [ (ipv4, ipv6, hostname), ( (vuln1, 5), (vuln2, 10) ... ) ] },
{ 'host_id2': [ (ipv4, ipv6, hostname), ( (vuln1, 5) ) ] }
"""
service_dict = {}
# go through each t_service_vulns identifier that is unique
query = (db.t_service_vulns.id > 0) & (db.t_service_vulns.f_services_id == db.t_services.id)
query = create_hostfilter_query(hostfilter, query, 't_services')
for service in db(query).select(db.t_service_vulns.f_services_id, groupby=db.t_service_vulns.f_services_id):
# find all the records with the service_id
q = (db.t_service_vulns.f_services_id == service.f_services_id)
q &= (db.t_service_vulns.f_vulndata_id == db.t_vulndata.id)
ip_dict = {}
# go through each
for row in db(q).select(cache=(cache.ram,120)):
svc_rec = db.t_services(row.t_service_vulns.f_services_id)
port_txt = "%s/%s" % (svc_rec.f_number, svc_rec.f_proto)
host_rec = get_host_record(svc_rec.f_hosts_id)
ip_info = ip_dict.setdefault(host_rec.f_ipaddr, [])
if row.t_vulndata.f_vulnid not in map(lambda x: x[0], ip_info):
ip_info.append((row.t_vulndata.f_vulnid, row.t_vulndata.f_severity, row.t_vulndata.f_cvss_score))
ip_dict[host_rec.f_ipaddr] = ip_info
for k, v in ip_dict.iteritems():
service_dict.setdefault(port_txt, dict())
service_dict[port_txt][k] = v
return service_dict
|
a03383596551d9cb789459531c5e8d808c27b502
| 3,638,817
|
def validate_tier_name(name):
"""
Property: Tier.Name
"""
valid_names = [WebServer, Worker]
if name not in valid_names:
raise ValueError("Tier name needs to be one of %r" % valid_names)
return name
|
6bba596f75dbdebe03bf133c4f1847ab0c5b6c49
| 3,638,818
|
def version_get(): # noqa: E501
"""Version
Version # noqa: E501
:rtype: InlineResponse2006
"""
response = InlineResponse2006()
version = Version()
v = CoreApiVersion.query.filter_by(active=True).first()
if v:
version.version = v.version
version.reference = v.reference
response.data = [version]
return cors_200(response_body=response)
else:
return cors_500(details='Unable to retrieve version information')
|
691bc9776d9f3c6cc1597d4d60fbc740c71ca27d
| 3,638,819
|
def language_register(df):
"""
Add 'training language', 'test language', 'training register' and 'test register'
columns to a dataframe.
This assumes that:
- the dataframe contains a 'training set' and 'test set' column
- the sets mentioned in these columns are properly documented in
the scone_phobia.metadata.corpora module. The only exception is
if a set is called 'None'. In that case, the new columns will
be set to 'None' as well.
"""
df['training language'] = ['None' if e == 'None' else corpora.language(e)
for e in df['training set']]
df['test language'] = ['None' if e == 'None' else corpora.language(e)
for e in df['test set']]
df['training register'] = ['None' if e == 'None' else corpora.register(e)
for e in df['training set']]
df['test register'] = ['None' if e == 'None' else corpora.register(e)
for e in df['test set']]
return df
|
e741eda59136a16d50ccf19350558e07177e6afa
| 3,638,820
|
def init(request):
"""
Wraps an incoming WSGI request in a Context object and initializes
several important attributes.
"""
set_umask() # do it once per request because maybe some server
# software sets own umask
if isinstance(request, Context):
context, request = request, request.request
else:
context = AllContext(request)
context.clock.start('total')
context.clock.start('init')
context.lang = setup_i18n_preauth(context)
context.session = context.cfg.session_service.get_session(context)
context.user = setup_user(context, context.session)
context.lang = setup_i18n_postauth(context)
def finish():
pass
context.finish = finish
context.reset()
context.clock.stop('init')
return context
|
e8e455611aceea6ac5b745e9b618b1375b82108e
| 3,638,822
|
def fetch_arm_sketch(X, ks, tensor_proj=True, **kwargs_rg):
"""
:param X: the tensor of dimension N
:param ks: array of size N
:param tensor_proj: True: use tensor random projection,
otherwise, use normal one
:param kwargs_rg:
:return: list of two, first element is list of arm sketches
and the second one is list of random matrices with size I_n\times k_n
"""
arm_sketches = []
omegas = []
for i, n in enumerate(X.shape):
shape = list(X.shape)
del shape[i]
if not tensor_proj:
omega = random_matrix_generator(shape, ks[i], **kwargs_rg)
arm_sketch = tl.unfold(X, mode=i) @ omega
arm_sketches.append(arm_sketch)
omegas.append(omega)
else:
omega = tensor_random_matrix_generator(shape, ks[i], **kwargs_rg)
arm_sketch = tl.unfold(X, mode=i) @ omega
arm_sketches.append(arm_sketch)
omegas.append(omega)
return arm_sketches, omegas
|
893da47798a3c167111072fe627a78e15d1996dd
| 3,638,823
|
def fitness(member):
"""Computes the fitness of a species member.
http://bit.ly/ui-lab5-dobrota-graf"""
if member < 0 or member >= 1024:
return -1
elif member >= 0 and member < 30:
return 60.0
elif member >= 30 and member < 90:
return member + 30.0
elif member >= 90 and member < 120:
return 120.0
elif member >= 120 and member < 210:
return -0.83333 * member + 220
elif member >= 210 and member < 270:
return 1.75 * member - 322.5
elif member >= 270 and member < 300:
return 150.0
elif member >= 300 and member < 360:
return 2.0 * member - 450
elif member >= 360 and member < 510:
return -1.8 * member + 918
elif member >= 510 and member < 630:
return 1.5 * member - 765
elif member >= 630 and member < 720:
return -1.33333 * member + 1020
elif member >= 720 and member < 750:
return 60.0
elif member >= 750 and member < 870:
return 1.5 * member - 1065
elif member >= 870 and member < 960:
return -2.66667 * member + 2560
else:
return 0
|
81e8bd12da458f0c4f2629e3781c2512c436c577
| 3,638,824
|
def style_loss(content_feats, style_grams, style_weights):
"""
Computes the style loss at a set of layers.
Inputs:
- feats: list of the features at every layer of the current image.
- style_targets: List of the same length as feats, where style_targets[i] is
a Tensor giving the Gram matrix the source style image computed at
layer style_layers[i].
- style_weights: List of the same length as style_targets, where style_weights[i]
is a scalar giving the weight for the style loss at layer style_layers[i].
Returns:
- style_loss: A Tensor contataining the scalar style loss.
"""
style_loss = tf.constant(0.0)
for i in tf.range(len(content_feats)):
layer_var = gram_matrix(content_feats[i])
loss_i = tf.reduce_mean((layer_var - style_grams[i])**2) * style_weights[i]
style_loss = tf.add(style_loss, loss_i)
return style_loss
|
ee8b5007ba2adc88d9965408591d1018027cf7f9
| 3,638,825
|
def get_annotations(joints_2d, joints_3d, scale_factor=1.2):
"""Get annotations, including centers, scales, joints_2d and joints_3d.
Args:
joints_2d: 2D joint coordinates in shape [N, K, 2], where N is the
frame number, K is the joint number.
joints_3d: 3D joint coordinates in shape [N, K, 3], where N is the
frame number, K is the joint number.
scale_factor: Scale factor of bounding box. Default: 1.2.
Returns:
centers (ndarray): [N, 2]
scales (ndarray): [N,]
joints_2d (ndarray): [N, K, 3]
joints_3d (ndarray): [N, K, 4]
"""
# calculate joint visibility
visibility = (joints_2d[:, :, 0] >= 0) * \
(joints_2d[:, :, 0] < train_img_size[0]) * \
(joints_2d[:, :, 1] >= 0) * \
(joints_2d[:, :, 1] < train_img_size[1])
visibility = np.array(visibility, dtype=np.float32)[:, :, None]
joints_2d = np.concatenate([joints_2d, visibility], axis=-1)
joints_3d = np.concatenate([joints_3d, visibility], axis=-1)
# calculate bounding boxes
bboxes = np.stack([
np.min(joints_2d[:, :, 0], axis=1),
np.min(joints_2d[:, :, 1], axis=1),
np.max(joints_2d[:, :, 0], axis=1),
np.max(joints_2d[:, :, 1], axis=1)
],
axis=1)
centers = np.stack([(bboxes[:, 0] + bboxes[:, 2]) / 2,
(bboxes[:, 1] + bboxes[:, 3]) / 2],
axis=1)
scales = scale_factor * np.max(bboxes[:, 2:] - bboxes[:, :2], axis=1) / 200
return centers, scales, joints_2d, joints_3d
|
27d7614ce32a3eb6b91db7edcb5abe757d011605
| 3,638,826
|
def Intensity(flag, Fin):
"""
I=Intensity(flag,Fin)
:ref:`Calculates the intensity of the field. <Intensity>`
:math:`I(x,y)=F_{in}(x,y).F_{in}(x,y)^*`
Args::
flag: 0= no normalization, 1=normalized to 1, 2=normalized to 255 (for bitmaps)
Fin: input field
Returns::
I: intensity distribution (N x N square array of doubles)
"""
I = _np.abs(Fin.field)**2
if flag > 0:
Imax = I.max()
if Imax == 0.0:
raise ValueError('Cannot normalize because of 0 beam power.')
I = I/Imax
if flag == 2:
I = I*255
return I
|
54642c7e5b68e7c87137534a91d990f90214a4b6
| 3,638,827
|
def index(request):
"""
Display the index for user related actions.
"""
export_form = ExportForm(request.POST or None)
return render(request, "users/index.html", {"export_form": export_form})
|
273e3d101034ef7cfd5b8795be42b48de0aea740
| 3,638,828
|
from typing import Dict
from typing import Any
import re
def _get_javascript_and_find_feature_flag(client: HttpSession, script_uri: str, headers: Dict[str, Any] = None) -> Any:
"""
Read through minified javascript for feature flags
"""
flag_str = None
# Since this is a large request, read incrementally
with client.get(
script_uri,
headers=headers,
stream=True,
name="Login.Feature_Toggles.GetJS",
catch_response=True
) as res:
test_response_for_error(res, script_uri)
res.encoding = "utf-8"
prev_chunk = ""
""" Sample regexes for the feature flag are:
var RAW_DEFAULT_FEATURE_FLAGS=0xdc9fffceebc;
var RAW_DEFAULT_FEATURE_FLAGS=5802956083228348;
var RAW_DEFAULT_FEATURE_FLAGS=jsbi__WEBPACK_IMPORTED_MODULE_10__["default"].BigInt("0b110100100111011100000111111111111111001110111010111100");
"""
for chunk in res.iter_content(8192, decode_unicode=True):
if flag_str:
# Not reading the whole stream will throw errors, so continue reading once found
continue
script_regexes = [
r'RAW_DEFAULT_FEATURE_FLAGS=(0x\w+|\d+);',
r'RAW_DEFAULT_FEATURE_FLAGS=jsbi__WEBPACK_IMPORTED_MODULE_\d+__\["default"\].BigInt\("(0b[01]+)"\);',
]
for script_regex in script_regexes:
js_match = re.search(script_regex, prev_chunk + chunk)
if js_match:
flag_str = js_match.groups()[0]
prev_chunk = chunk
return flag_str
|
632eeba5483dffd5ef9648c41e78cf3b135f0d3d
| 3,638,829
|
def next_power_of_two(v: int):
""" returns x | x == 2**i and x >= v """
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v += 1
return v
|
9c62840e2dc2cd44666328c32c48e5867523ba6c
| 3,638,830
|
def triangle(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
"""Continuous triangle wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude. Wave range is [-amp, amp].
freq: Pulse frequency. units of 1/dt.
phase: Pulse phase.
"""
return amp*(-2*np.abs(
sawtooth(times, 1, freq, phase=(phase-np.pi/2)/2)) + 1).astype(np.complex_)
|
83933dbed893026b1a53490fb32587d918c15a1f
| 3,638,831
|
def add_token(token_sequence: str, tokens: str) -> str:
"""Adds the tokens from 'tokens' that are not already contained in
`token_sequence` to the end of `token_sequence`::
>>> add_token('', 'italic')
'italic'
>>> add_token('bold italic', 'large')
'bold italic large'
>>> add_token('bold italic', 'bold')
'bold italic'
>>> add_token('red thin', 'stroked red')
'red thin stroked'
"""
for tk in tokens.split(' '):
if tk and token_sequence.find(tk) < 0:
token_sequence += ' ' + tk
return token_sequence.lstrip()
|
2506dd00b55e9d842dc90c40578e0c21d942e73e
| 3,638,832
|
def ket2dm(psi):
"""
convert a ket into a density matrix
Parameters
----------
psi : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
return np.einsum("i, j -> ij", psi, psi.conj())
|
96773dfe3db251a72d5da41be670c830cd3ff764
| 3,638,833
|
def _make_feature_stats_proto(
stats_values,
feature_name):
"""Creates the FeatureNameStatistics proto for one feature.
Args:
stats_values: A Dict[str,float] where the key of the dict is the name of the
custom statistic and the value is the numeric value of the custom
statistic of that feature. Ex. {
'Mutual Information': 0.5,
'Correlation': 0.1 }
feature_name: The name of the feature.
Returns:
A FeatureNameStatistic proto containing the custom statistics for a
feature.
"""
result = statistics_pb2.FeatureNameStatistics()
result.name = feature_name
# Sort alphabetically by statistic name to have deterministic ordering
stat_names = sorted(stats_values.keys())
for stat_name in stat_names:
result.custom_stats.add(name=stat_name, num=stats_values[stat_name])
return result
|
e4babf49fad6a086860a6024aa0ffffc46e034a0
| 3,638,834
|
import torch
def build_lr_scheduler(
cfg, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.NAME
if name == "WarmupMultiStepLR":
return WarmupMultiStepLR(
optimizer,
cfg.STEPS,
cfg.GAMMA,
warmup_factor=cfg.WARMUP_FACTOR,
warmup_iters=cfg.WARMUP_ITERS,
warmup_method=cfg.WARMUP_METHOD,
)
elif name == "WarmupCosineLR":
return WarmupCosineLR(
optimizer,
cfg.MAX_ITER,
warmup_factor=cfg.WARMUP_FACTOR,
warmup_iters=cfg.WARMUP_ITERS,
warmup_method=cfg.WARMUP_METHOD,
)
elif name == "OneCycleLR":
return OneCycleLR(
optimizer,
cfg.MAX_LR,
total_steps=cfg.MAX_ITER,
pct_start=cfg.PCT_START,
base_momentum=cfg.BASE_MOM,
max_momentum=cfg.MAX_MOM,
div_factor=cfg.DIV_FACTOR
)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
|
3f4ec6a4ab2601642225118b9a7e9a60d7af6548
| 3,638,835
|
def cconv(x, y, P):
""" Periodic convolution with period P of two signals x and y
"""
x = _wrap(x, P)
h = _wrap(y, P)
return np.fromiter([np.dot(np.roll(x[::-1], k+1), h) for k in np.arange(P)], float)
|
4bc0abba6af31ea98e292a82aab704c76748cceb
| 3,638,836
|
def keypoint_loss_targets(uvd, keys_uvd, mparams):
"""Computes the supervised keypoint loss between computed and gt keypoints.
Args:
uvd: [batch, order, num_targs, 4, num_kp] Predicted set of keypoint uv's
(pixels).
keys_uvd: [batch, order, num_targs, 4, num_kp] The ground-truth set of uvdw
coords.
mparams: model parameters.
Returns:
Keypoint projection loss of size [batch, order].
"""
print('uvd shape in klt [batch, order, num_targs, 4, num_kp]:', uvd.shape)
print('keys_uvd shape in klt [batch, order, num_targs, 4, num_kp]:',
keys_uvd.shape)
keys_uvd = nets.to_norm(keys_uvd, mparams)
uvd = nets.to_norm(uvd, mparams)
wd = tf.square(uvd[..., :2, :] - keys_uvd[..., :2, :])
wd = tf.reduce_sum(wd, axis=[-1, -2]) # uv dist [batch, order, num_targs]
print('wd shape in klt [batch, order, num_targs]:', wd.shape)
wd = tf.reduce_mean(wd, axis=[-1]) # [batch, order]
return wd
|
0bd44661695e1024a56b441110ca4e9c3dae3634
| 3,638,839
|
import re
def get_initial_epoch(log_path):
"""
从log文件中获取最近训练结束时的epoch,重新运行代码后会接着这个epoch继续训练。
"""
initial_epoch = 0
if tf.gfile.Exists(log_path):
with open(log_path) as log_file:
line_ind = -1
for _, line in enumerate(log_file):
line = line.strip()
if line == '':
continue
t = re.split(r',', line)[0]
if not t.isdigit():
continue
line_ind = 1+int(t)
initial_epoch = line_ind
return initial_epoch
|
d0e2adcccbfa59e1f685f698255c750d5a85ddaf
| 3,638,840
|
from datetime import datetime
def categorise_town_flood_risk(stations, dt, degree, risklevel=3, plot=False):
"""A function that takes a list "stations" of station objects, performs polyfit over a period of "dt" days up to a "degree" degree and then returns towns with their respective flood risk.
The flood risk is determined by three tolerances (tol1, tol2, tol3) which are taken in as arguments. tol1 defines the lower boundary of severe flood risk. tol2 defines the lower boundary
of high flood risk and tol3 defines the lower boundary of moderate flood risk. Any towns with a flood risk index below tol3 are deemed to have a low risk of flooding. These tolerances are
calculated internally by modelling the risks as a normal distribution. tol1 == mean + 2 * standard deviation, tol2 == mean + standard deviation, tol3 == mean. This is not an accurate warning
system when all stations have risk of flooding, but it gives a good indication for stations that have a higher than normal increase in water level. The variable 'risklevel' sets the threshold for
returning endangered towns and can take values: 0, 1, 2, 3 in accordance to the classification mentioned above. "plot" enables or disabled plotting of the waterlevel with fitted polynomial for towns at risk."""
towns = {}
for station in stations: #iterate through list of station objects and create dictionary of stations in a town
if station.town in towns.keys():
towns[station.town].append(station)
else:
towns[station.town] = [station]
townsandrisks = [] #create empty list for towns and their risk rating
total_risks = [] #create empty list for all risk-indices to calculate spread of risk and thresholds for flooding
risk_by_town = [] #create empty list for towns and their highes risk-index
for town in towns.keys():
riskindicator = []
stations = towns.get(town)
for station in stations:
dates, levels = fetch_measure_levels(station.measure_id, datetime.timedelta(dt))
if len(dates) == 0: #use polyfit function to extract coefficients and d0 from the stations in question
continue
poly, d0 = polyfit(dates, levels, degree)
coeffs = poly.c
diffcoeffs = np.zeros(len(coeffs))
for i in range(len(coeffs)):
diffcoeffs[i] = coeffs[i]*(len(coeffs)-i-1) #create list with differentiated coefficients (original coefficient multiplied by the grade of the polynomial (determined by i))
diff_value = 0 #calculate the instantaneus rate of change at the last date in the list of dates
x = matplotlib.dates.date2num(dates)
for i in range(len(diffcoeffs)):
diff_value += diffcoeffs[i]*(x[-1]+d0)**i
riskindicator.append(diff_value)
total_risks.append(diff_value)
if len(riskindicator) == 0:
continue
risk_by_town.append((town, max(riskindicator)))
mean = np.mean(total_risks) #as we are trying to find relative flood risks and want to determine the towns with the highest flood risk in the country, we can assume that the risk-index is distributed normally
st_dev = np.std(total_risks) #using the mean and standard deviation we can set the boundaries for severe, high, moderate and low flood risk
tol1 = mean + 2 * st_dev
tol2 = mean + st_dev
tol3 = mean
for town in risk_by_town:
if town[1] > tol1: #append and sort a list of towns and their risk level as tuples. 0 == low, 1 == moderate, 2 == high, 3 == severe
townsandrisks.append((town[0], 3)) #Exact tolerances to be determined once 2F completed!
elif town[1] <= tol1 and town[1] > tol2:
townsandrisks.append((town[0], 2))
elif town[1] <= tol2 and town[1] > tol3:
townsandrisks.append((town[0], 1))
elif town[1] <= tol3:
townsandrisks.append((town[0], 0))
sorted_by_key(townsandrisks, 1, reverse=True)
greatestrisks = []
for town in townsandrisks:
if town[1] >= risklevel:
if town[1] == 3:
greatestrisks.append((town[0], 'severe'))
elif town[1] == 2:
greatestrisks.append((town[0], 'high'))
elif town[1] == 1:
greatestrisks.append((town[0], 'moderate'))
elif town[1] == 0:
greatestrisks.append((town[0], 'low')) #return list of towns with severe flood risk
for town in greatestrisks:
station = towns[town[0]][0]
dates, levels = fetch_measure_levels(station.measure_id, datetime.timedelta(dt))
if plot == True:
plot_water_level_with_fit(station, dates, levels, degree)
else:
continue
return greatestrisks
|
526550890d8201c4a7b90f9ba37fa3f383118293
| 3,638,841
|
def create_clients(num_clients, client_data, input_str='input', label_str='label', client_str='client-',
distribute=False):
"""
create K clients
:param config: network_config
:param num_clients: the number of clients
:param client_data: Dictionary of clients data. data[client][input or label]
:param input_str: input string of client_data
:param label_str: label string of label_data
:return: List(Client)
"""
clients = []
for i in range(num_clients):
client_id = i+1
client = Client(client_id,
client_data[f'{client_str}{client_id}'][input_str],
client_data[f'{client_str}{client_id}'][label_str],
distribute)
clients.append(client)
return clients
|
77a26cd53f701eaadd4d6577b80dc894022002d7
| 3,638,842
|
import typing
def l2_loss(
h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
theta: np.ndarray,
x, y):
"""l2_loss: standard l2 loss.
The l2 loss is defined as (h(x) - y)^2. This is usually used for linear
regression in the sum of squares.
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param theta: The parameters of our hypothesis fucntion
:type theta: np.ndarray
:param x: A matrix of samples and their respective features.
:type x: np.ndarray of shape (samples, features)
:param y: The expected targets our model is attempting to match
:type y: np.ndarray of shape (samples,)
:return: The l2 loss value
:rtype: float
"""
return np.sum(np.square((h(theta, x) - y)))
|
f958476912a1866bd55653d3f896ddb0fe93a614
| 3,638,843
|
def BestLogLikelihood(aln, alphabet=None, exclude_chars = None,
allowed_chars='ACGT', motif_length=None, return_length=False):
"""returns the best log-likelihood according to Goldman 1993.
Arguments:
- alphabet: a sequence alphabet object.
- motif_length: 1 for nucleotide, 2 for dinucleotide, etc ..
- exclude_chars: a series of characters used to exclude motifs
- allowed_chars: only motifs that contain a subset of these are
allowed
- return_length: whether to also return the number of alignment columns
"""
assert alphabet or motif_length, "Must provide either an alphabet or a"\
" motif_length"
# need to use the alphabet, so we can enforce character compliance
if alphabet:
kwargs = dict(moltype=alphabet.MolType)
motif_length = alphabet.getMotifLen()
else:
kwargs = {}
aln = LoadSeqs(data=aln.todict(), **kwargs)
columns = aligned_columns_to_rows(aln, motif_length, exclude_chars,
allowed_chars)
num_cols = len(columns)
log_likelihood = get_G93_lnL_from_array(columns)
if return_length:
return log_likelihood, num_cols
return log_likelihood
|
a3b5338b3925281941a55ca28250d262a69dce1c
| 3,638,844
|
def process_h5_file(h5_file):
"""Do the processing of what fields you'll use here.
For example, to get the artist familiarity, refer to:
https://github.com/tbertinmahieux/MSongsDB/blob/master/PythonSrc/hdf5_getters.py
So we see that it does h5.root.metadata.songs.cols.artist_familiarity[songidx]
and it would translate to:
num_songs = len(file['metadata']['songs'])
file['metadata']['songs'][:num_songs]['artist_familiarity']
Since there is one song per file, it simplifies to:
file['metadata']['songs'][:1]['artist_familiarity']
I recommend downloading one file, opening it with h5py, and explore/practice
To see the datatype and shape:
http://millionsongdataset.com/pages/field-list/
http://millionsongdataset.com/pages/example-track-description/
"""
return h5_file['metadata']['songs'][:1]['artist_familiarity'][0]
|
de664a3e1ea88c8c8cd06b210e4c6b756a4d02a6
| 3,638,845
|
import logging
import copy
from perses.utils.openeye import iupac_to_oemol
import perses.rjmc.geometry as geometry
import perses.rjmc.topology_proposal as topology_proposal
from perses.tests.utils import compute_potential_components
def run_geometry_engine(index=0):
"""
Run the geometry engine a few times to make sure that it actually runs
without exceptions. Convert n-pentane to 2-methylpentane
"""
logging.basicConfig(level=logging.INFO)
molecule_name_1 = 'benzene'
molecule_name_2 = 'biphenyl'
#molecule_name_1 = 'imatinib'
#molecule_name_2 = 'erlotinib'
molecule1 = iupac_to_oemol(molecule_name_1)
molecule2 = iupac_to_oemol(molecule_name_2)
new_to_old_atom_mapping = align_molecules(molecule1, molecule2)
sys1, pos1, top1 = oemol_to_openmm_system(molecule1)
sys2, pos2, top2 = oemol_to_openmm_system(molecule2)
sm_top_proposal = topology_proposal.TopologyProposal(new_topology=top2, new_system=sys2, old_topology=top1, old_system=sys1,
old_chemical_state_key='',new_chemical_state_key='', logp_proposal=0.0, new_to_old_atom_map=new_to_old_atom_mapping, metadata={'test':0.0})
sm_top_proposal._beta = beta
geometry_engine = geometry.FFAllAngleGeometryEngine(metadata={})
# Turn on PDB file writing.
geometry_engine.write_proposal_pdb = True
geometry_engine.pdb_filename_prefix = 't13geometry-proposal'
test_pdb_file = open("%s_to_%s_%d.pdb" % (molecule_name_1, molecule_name_2, index), 'w')
def remove_nonbonded_force(system):
"""Remove NonbondedForce from specified system."""
force_indices_to_remove = list()
for [force_index, force] in enumerate(system.getForces()):
if force.__class__.__name__ == 'NonbondedForce':
force_indices_to_remove.append(force_index)
for force_index in force_indices_to_remove[::-1]:
system.removeForce(force_index)
valence_system = copy.deepcopy(sys2)
remove_nonbonded_force(valence_system)
integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
integrator_1 = openmm.VerletIntegrator(1*unit.femtoseconds)
ctx_1 = openmm.Context(sys1, integrator_1)
ctx_1.setPositions(pos1)
ctx_1.setVelocitiesToTemperature(300*unit.kelvin)
integrator_1.step(1000)
pos1_new = ctx_1.getState(getPositions=True).getPositions(asNumpy=True)
context = openmm.Context(sys2, integrator)
context.setPositions(pos2)
state = context.getState(getEnergy=True)
print("Energy before proposal is: %s" % str(state.getPotentialEnergy()))
openmm.LocalEnergyMinimizer.minimize(context)
new_positions, logp_proposal = geometry_engine.propose(sm_top_proposal, pos1_new, beta)
logp_reverse = geometry_engine.logp_reverse(sm_top_proposal, new_positions, pos1, beta)
print(logp_reverse)
app.PDBFile.writeFile(top2, new_positions, file=test_pdb_file)
test_pdb_file.close()
context.setPositions(new_positions)
state2 = context.getState(getEnergy=True)
print("Energy after proposal is: %s" %str(state2.getPotentialEnergy()))
print(compute_potential_components(context))
valence_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
platform = openmm.Platform.getPlatformByName("Reference")
valence_ctx = openmm.Context(valence_system, valence_integrator, platform)
valence_ctx.setPositions(new_positions)
vstate = valence_ctx.getState(getEnergy=True)
print("Valence energy after proposal is %s " % str(vstate.getPotentialEnergy()))
final_potential = state2.getPotentialEnergy()
return final_potential / final_potential.unit
|
a2918f3073f3b0ec34ad3519693d25b433852b04
| 3,638,846
|
def add_changes_metrics(df, connection):
"""This function joins the data from the jira_issues table with the data
from the FTS3 table. It gets, for each issue, the number of lines_added,
lines_removed and files_changed.
"""
out_df = df.copy()
metrics = df["key"].apply(get_commits_from_issue, args=(connection,))
out_df["num_commits"] = list(map(lambda x: x[1], metrics))
out_df["lines_added"] = list(map(lambda x: x[2], metrics))
out_df["lines_removed"] = list(map(lambda x: x[3], metrics))
out_df["files_changed"] = list(map(lambda x: x[4], metrics))
return out_df
|
67f61a4400f3ce67bc9c877017609e7460c82aaf
| 3,638,847
|
def binary_str(num):
""" Return a binary string representation from the posive interger 'num'
:type num: int
:return:
Examples:
>>> binary_str(2)
'10'
>>> binary_str(5)
'101'
"""
# Store mod 2 operations results as '0' and '1'
bnum = ''
while num > 0:
bnum = str(num & 0x1) + bnum
num = num >> 1
return bnum
|
dde400323fccb9370c67197f555d9c41c40084a6
| 3,638,848
|
def sample_target_pos(batch_size,TARGET_MAX_X, TARGET_MIN_X, TARGET_MAX_Y, TARGET_MIN_Y):
"""
Sample target_position or robot_position by respecting to their limits.
"""
random_init_x = np.random.random_sample(batch_size) * (TARGET_MAX_X - TARGET_MIN_X) + \
TARGET_MIN_X
random_init_y = np.random.random_sample(batch_size) * (TARGET_MAX_Y - TARGET_MIN_Y) + \
TARGET_MIN_Y
return th.FloatTensor(np.concatenate((random_init_x[...,None], random_init_y[...,None]),axis=1))
|
5e0536662976b3965b3325b0214e6d41678e6f3f
| 3,638,849
|
def clf_perceptron(vector_col:str,
df_train:pd.DataFrame,
model:Perceptron,
) -> list:
"""return classification for multi-layer perception
Arguments:
vector_col (str): name of the columns with vectors to classify
df_train (pd.DataFrame): dataframe with training data
model (sklearn.linear_model.Perceptron): fitted (multi-layer) perceptron
Returns:
list of "0" and "1" predictions
"""
#if return_ranking: return list(model.decision_function(df[vector_col].to_list()))
return list(model.predict(df_train[vector_col].to_list()))
|
e692f4b887b5b0235ef196250fb3dc4415ec510a
| 3,638,850
|
def compute_phot_error(flux_variance, bg_phot, bg_method, ap_area, epadu=1.0):
"""Computes the flux errors using the DAOPHOT style computation
Parameters
----------
flux_variance : array
flux values
bg_phot : array
background brightness values.
bg_method : string
background method
ap_area : array
the area of the aperture in square pixels
epadu : float
(optional) Gain in electrons per adu (only use if image units aren't e-). Default value is 1.0
Returns
-------
flux_error : array
an array of flux errors
"""
bg_variance_terms = (ap_area * bg_phot['aperture_std'] ** 2.) * (1. + ap_area/bg_phot['aperture_area'])
variance = flux_variance / epadu + bg_variance_terms
flux_error = variance ** .5
return flux_error
|
4470277ebc41cce0e2c8c41c2f03e3466473d749
| 3,638,851
|
def reflect_table(conn, table_name, schema='public'):
"""Reflect basic table attributes."""
column_meta = list(get_column_metadata(conn, table_name, schema=schema))
primary_key_columns = list(get_primary_keys(conn, table_name, schema=schema))
columns = [Column(**column_data) for column_data in column_meta]
primary_key = PrimaryKey(primary_key_columns)
return Table(table_name, columns, primary_key, schema=schema)
|
fb41243328a7b3da27dbcc355fdf36ef20915f94
| 3,638,852
|
def get_token(token_file):
"""
Reads the first line from token_file to get a token
"""
with open(token_file, "r") as fin:
ret = fin.read().strip()
if not ret:
raise ReleaseException("No valid token found in {}".format(token_file))
return ret
|
764d3ca320953cd5ccfeccd60a9cbeb2efb504e1
| 3,638,853
|
def get_description(soup):
"""Извлечь текстовое описание вакансии"""
non_branded = soup.find('div', {'data-qa':'vacancy-description'})
branded = soup.find('div', {'class':'vacancy-section HH-VacancyBrandedDescription-DANGEROUS-HTML'})
description = non_branded or branded
return description.get_text()
|
82a254774be3eb55762d9964928429d168572a2a
| 3,638,854
|
from pathlib import Path
def read_fits(fn: Path, ifrm: int, twoframe: bool) -> np.ndarray:
"""
ifits not ifrm for fits!
"""
if fits is None:
raise ImportError('Need Astropy for FITS')
# memmap = False required thru at least Astropy 1.3.2 due to BZERO used...
with fits.open(fn, mode='readonly', memmap=False) as f:
if twoframe:
frame = f[0][ifrm:ifrm+2, :, :]
else:
frame = f[0][ifrm+1, :, :]
return frame
|
f307cb2dc17c518b26591a3a8adf165eca000d66
| 3,638,855
|
def normalize(arrList):
"""
Normalize the arrayList, meaning divide each column by its standard deviation if that
standard deviation is nonzero, and leave the column unmodified if it's standard deviation
is zero.
Args:
arrList (a list of lists of numbers)
Returns:
list, list: A pair consisting of a list each entry of which is the standard deviation
of the corresponding column of arrList, and an arrayList (a list of lists)
each column of which is that column divided by the standard deviation of
the column that column if that standard deviation is nonzero. Columns
with zero standard deviation are left unchanged.
>>> normalize([[1, 2, 3], [6, 7, 8]]) # doctest:+ELLIPSIS
([2.5, 2.5, 2.5],...
>>> normalize([[1, 2, 3], [1, 7, 3]]) # doctest:+ELLIPSIS
([0.0, 2.5, 0.0],...
>>> normalize([[]])
([], [[]])
"""
_, centered = mean_center(arrList)
centered_squared = multiply(centered, centered)
stdevs = list(map(lambda x: x**0.5, columnwise_means(centered_squared)))
nonzero_stdevs = list(map(lambda x: 1 if x == 0 else x, stdevs))
inverses = list(map(lambda x: 1/x, nonzero_stdevs))
return stdevs, scalarMultCols(inverses, arrList)
|
dfde56e87ab916f2777257782bc3f797e8bdfe74
| 3,638,856
|
import requests
from bs4 import BeautifulSoup
import re
def get_reddit_backup_urls(mode):
"""
Parse reddit backups on pushshift.io
:param mode: 'Q' for questions, 'A' for answers
:return: dict of (year, month): backup_url
"""
mode = {'Q': 'submissions', 'A': 'comments'}[mode]
page = requests.get(REDDIT_URL + mode)
soup = BeautifulSoup(page.content, 'lxml')
files = [it for it in soup.find_all(attrs={'class': 'file'})]
f_urls = [tg.find_all(lambda x: x.has_attr('href'))[0]['href']
for tg in files if len(tg.find_all(lambda x: x.has_attr('href'))) > 0]
dict_date_url = {}
for url_st in f_urls:
ls = re.findall(r"20[0-9]{2}-[0-9]{2}", url_st)
if len(ls) > 0:
yr, mt = ls[0].split('-')
dict_date_url[(int(yr), int(mt))] = REDDIT_URL + mode + url_st[1:]
return dict_date_url
|
e66ee167d1b10d0e67f84c60d8f4ae589eaeec38
| 3,638,857
|
def collect_elf_segments(elf, file_type, segment_els, section_prefix, namespace, image, machine, pools):
"""
Process all of the segment elements in a program/kernel, etc.
"""
elf_seg_names = elf_segment_names(elf)
shash = segments_hash(segment_els)
# Test that every segment element references a segment in the ELF
# file.
elf_seg_names_txt = elf_seg_names.values()
for seg_name in shash.keys():
if seg_name not in elf_seg_names_txt:
raise MergeError, \
'%s: Cannot find segment "%s" in the ELF file. ' \
'Valid values are %s' % (namespace.abs_name('.'), seg_name, elf_seg_names_txt)
collected_segments = []
i = 0
last_segment = None # The last segment processed.
group = []
for segment in elf.segments:
attrs = image.new_attrs(namespace, for_segment = True)
attrs.virt_addr = segment.vaddr
attrs.attach = segment.flags
attrs.elf_flags = segment.flags
# The kernel is *very* picky about alignment, so use the
# segment's alignment rules by default.
if file_type == image.KERNEL:
attrs.align = segment.align
# XXX: The kernel image is generated very weirdly on x86. These
# hacks are connected to some other PC99 hacks in tools/build.py.
if elf.machine == EM_386:
attrs.align = machine.min_page_size()
attrs.phys_addr = segment.paddr
if elf_seg_names.has_key(i):
seg_name = elf_seg_names[i]
attrs.name = seg_name
if shash.has_key(seg_name):
segment_el = shash[seg_name]
attrs.phys_addr = getattr(segment_el, 'phys_addr', attrs.phys_addr)
attrs.physpool = getattr(segment_el, 'physpool', attrs.physpool)
attrs.align = getattr(segment_el, 'align', attrs.align)
attrs.pager = getattr(segment_el, 'pager', attrs.pager)
attrs.direct = getattr(segment_el, 'direct', attrs.direct)
attrs.protected = getattr(segment_el, 'protected', attrs.protected)
if hasattr(segment_el, 'attach'):
attrs.attach = attach_to_elf_flags(segment_el.attach)
if hasattr(segment_el, 'pager'):
attrs.pager = make_pager_attr(segment_el.pager)
if hasattr(segment_el, 'cache_policy'):
attrs.cache_policy = machine.get_cache_policy(segment_el.cache_policy)
else:
attrs.name = str(i)
s = image.add_segment(segment_index = i,
segment = segment,
section_prefix = section_prefix,
file_type = file_type,
attrs = attrs,
machine = machine,
pools = pools)
if s is not None:
collected_segments.append(s)
# It is possible for different segments to occupy the same
# page of memory (yes! really!). To accommodate this
# place segments that have the same flags into the same
# static allocation groups, where sub-page allocation is
# allowed.
#
# Do not set the maximum distance the segments can be
# apart. The distance support was originally designed
# with this code in mind, but with testing it's been shown
# that it is difficult to get the distance value right.
if last_segment is not None and \
last_segment.flags != segment.flags:
image.add_group(None, group)
group = []
group.append(s)
last_segment = segment
i = i + 1
image.add_group(None, group)
return collected_segments
|
9c82b7dfb6d6d9bcdfeae6e11683826b6b4b2a18
| 3,638,858
|
def getStrVector(tarstr,cdict,clen=None):
"""
将字符串向量化,向量的每一项对应字符集中一种字符的频数,字符集、每种字符在向量中对应的下标由cdict提供
"""
if not clen:
clen=len(cdict.keys())
vec=[0]*clen
for c in tarstr:
vec[cdict[c]]+=1
#vec[cdict[c]]=1
return vec
|
d23b54288d6a9cff2f3999c54f53a7f8e2d5d35f
| 3,638,859
|
def process_request(registry: ServiceRegistry, url: str) -> str:
""" Given URL (customer name), make a Request to handle interaction """
# Make the container that this request gets processed in
container = registry.create_container()
# Put the url into the container
container.register_singleton(url, Url)
# Create a View to generate the greeting
view = container.get(View)
# Generate a response
response = view()
return response
|
48773840f325838e97f7a8c2b7c6dfa056a4f8e6
| 3,638,860
|
def percent_color(percentage: float) -> str:
""" Generate a proper color for a percentage for printing. """
color = 'red'
if percentage > 30:
color = 'yellow'
if percentage > 70:
color = 'green'
return colored(percentage, color)
|
bff809b98a98492cd67f5f3b1274bbd9f2a42203
| 3,638,861
|
def _generate_image_and_label_batch(image, image_raw, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.'''
example: min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 8
if shuffle:
images, images_raw, labels = tf.train.shuffle_batch(
[image, image_raw, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, images_raw, label_batch = tf.train.batch(
[image, image_raw, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, images_raw, label_batch
|
1abd2059b0c752b3c884c2525870581afc07fdab
| 3,638,862
|
def elgamal_add(*ciphertexts: ElGamalCiphertext) -> ElGamalCiphertext:
"""
Homomorphically accumulates one or more ElGamal ciphertexts by pairwise multiplication. The exponents
of vote counters will add.
"""
assert len(ciphertexts) != 0, "Must have one or more ciphertexts for elgamal_add"
pads = [c.pad for c in ciphertexts]
data = [c.data for c in ciphertexts]
return ElGamalCiphertext(mult_p(*pads), mult_p(*data))
|
eb06c4a893aada24c9acb82c5542a8e891802b23
| 3,638,864
|
def find_min_cost_thresholds(roc_curves, base_rates, proportions, cost_matrix):
"""Compute thresholds by attribute values that minimize cost.
:param roc_curves: Receiver operating characteristic (ROC)
by attribute.
:type roc_curves: dict
:param base_rates: Base rate by attribute.
:type base_rates: dict
:param proportions: Proportion of each attribute value.
:type proportions: dict
:param cost_matrix: Cost matrix by [[tn, fp], [fn, tp]].
:type cost_matrix: sequence
:return: Thresholds, FPR and TPR by attribute and cost value.
:rtype: tuple
"""
# pylint: disable=cell-var-from-loop
cutoffs = {}
fpr_tpr = {}
cost = 0
thresholds = _extract_threshold(roc_curves)
for group, roc in roc_curves.items():
def group_cost_function(index):
fpr = roc[0][index]
tpr = roc[1][index]
return -_cost_function(fpr, tpr,
base_rates[group], cost_matrix)
cost_per_threshold = [group_cost_function(index)
for index in range(len(thresholds))]
cutoff_index = np.argmin(cost_per_threshold)
cutoffs[group] = thresholds[cutoff_index]
fpr_tpr[group] = (roc[0][cutoff_index],
roc[1][cutoff_index])
cost += group_cost_function(cutoff_index) * proportions[group]
return cutoffs, fpr_tpr, cost
|
fc17b2f8ef404a199798b522461c508cb57fb3bd
| 3,638,865
|
def get_content_type_encoding(curi):
"""
Determine the content encoding based on the `Content-Type` Header.
`curi` is the :class:`CrawlUri`.
"""
content_type = "text/plain"
charset = ""
if curi.rep_header and "Content-Type" in curi.rep_header:
(content_type, charset) = extract_content_type_encoding(
curi.rep_header["Content-Type"])
if charset == "" and curi.content_body and len(curi.content_body) >= 512:
# no charset information in the http header
first_bytes = curi.content_body[:512].lower()
ctypestart = first_bytes.find("content-type")
if ctypestart != -1:
# there is a html header
ctypestart = first_bytes.find("content=\"", ctypestart)
ctypeend = first_bytes.find("\"", ctypestart + 9)
return extract_content_type_encoding(
first_bytes[ctypestart + 9:ctypeend])
return (content_type, charset)
|
9cf683eb8eceb9d27f374b3a135f2aac2d2c4b8e
| 3,638,866
|
def truncate_errors(install_stdout, install_errors, language_detection_errors,
compile_errors, max_error_len=10*1024):
"""
Combine lists of errors into a single list under a maximum length.
"""
install_stdout = install_stdout or []
install_errors = install_errors or []
language_detection_errors = language_detection_errors or []
compile_errors = compile_errors or []
all_errors = install_stdout + install_errors + language_detection_errors + compile_errors
result = []
if sum(len(line) for line in all_errors) <= max_error_len:
if install_stdout or install_errors:
result.append(INSTALL_ERROR_START)
result.extend(install_stdout)
result.append(INSTALL_ERROR_MID)
result.extend(install_errors)
result.append(INSTALL_ERROR_END)
result.extend(language_detection_errors)
result.extend(compile_errors)
return result
def bound_errors(source, bound):
total_length = sum(len(line) for line in source)
if total_length <= bound:
return total_length, source
length = 0
current = 0
result = []
# Take 1/3 from start of errors
while current < len(source) and (
length == 0 or
length + len(source[current]) < bound // 3):
result.append(source[current])
length += len(source[current])
current += 1
if current < len(source):
result.append("...(output truncated)...")
end_errors = []
end = current
current = -1
while current >= -(len(source) - end) and (
len(end_errors) == 0 or
length + len(source[current])) < bound:
end_errors.append(source[current])
length += len(source[current])
current -= 1
result.extend(reversed(end_errors))
return length, result
remaining_length = max_error_len
if install_stdout or install_errors:
result.append(INSTALL_ERROR_START)
used, lines = bound_errors(install_stdout, 0.2 * max_error_len)
remaining_length -= used
result.extend(lines)
result.append(INSTALL_ERROR_MID)
used, lines = bound_errors(install_errors,
max(0.3 * max_error_len,
0.5 * max_error_len - used))
remaining_length -= used
result.extend(lines)
result.append(INSTALL_ERROR_END)
_, lines = bound_errors(language_detection_errors + compile_errors, remaining_length)
result.extend(lines)
return result
|
b807cea3c5cb7c0fc73c464d4d0205fd60f80504
| 3,638,867
|
from typing import List
def is_balanced(expression: str) -> bool:
"""
Checks if a string is balanced.
A string is balanced if the types of brackets line up.
:param expression: is the expression to evaluate.
:raise AttributeError: if the expression is None.
:return: a boolean value determining if the string is balanced.
"""
if expression is None:
raise AttributeError("Expression cannot be None.")
bracket_map = {
"(": ")",
"[": "]",
"{": "}",
"<": ">"
}
stack: List[str] = []
balanced: bool = False
for letter in expression:
# We have a matching opening bracket
if letter in bracket_map:
stack.append(letter)
if len(stack) >= 1:
previous = peek(stack)
# We have a matching ending bracket
if bracket_map[previous] == letter:
popped = stack.pop(len(stack) - 1)
balanced = True
else:
balanced = False
return balanced
|
124e5e61aec81b16c2f7457da4ba5d43219ccae1
| 3,638,868
|
from typing import Optional
from typing import Tuple
from typing import Sequence
def get_committee_assignment(state: BeaconState,
epoch: Epoch,
validator_index: ValidatorIndex
) -> Optional[Tuple[Sequence[ValidatorIndex], CommitteeIndex, Slot]]:
"""
Return the committee assignment in the ``epoch`` for ``validator_index``.
``assignment`` returned is a tuple of the following form:
* ``assignment[0]`` is the list of validators in the committee
* ``assignment[1]`` is the index to which the committee is assigned
* ``assignment[2]`` is the slot at which the committee is assigned
Return None if no assignment.
"""
next_epoch = get_current_epoch(state) + 1
assert epoch <= next_epoch
start_slot = compute_start_slot_at_epoch(epoch)
for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH):
for index in range(get_committee_count_at_slot(state, Slot(slot))):
committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index))
if validator_index in committee:
return committee, CommitteeIndex(index), Slot(slot)
return None
|
ce9f0ab9e4b643a0ab9c87cf8b46fca5f9c0966a
| 3,638,869
|
def is_custfmt0(*args):
"""
is_custfmt0(F) -> bool
Does the first operand use a custom data representation?
@param F (C++: flags_t)
"""
return _ida_bytes.is_custfmt0(*args)
|
c836ded70576a4e7d2b6d21dff5c649e22b6229e
| 3,638,870
|
from typing import Set
def get_nfa_by_graph(
graph: nx.MultiDiGraph, start_nodes: Set[int] = None, final_nodes: Set[int] = None
) -> NondeterministicFiniteAutomaton:
"""
Creates a Nondeterministic Finite Automaton for a specified graph.
If start_nodes and final_nodes are not specified, all nodes are considered start and end.
Parameters
----------
graph: nx.MultiDiGraph
Graph for creating NFA
start_nodes: Set[int]
Set of start nodes
final_nodes: Set[int]
Set of final nodes
Returns
-------
EpsilonNFA
Epsilon Nondeterministic Finite Automaton which equivalent to graph
Raises
------
ValueError
If node does not present in the graph
"""
nfa = NondeterministicFiniteAutomaton()
# add the necessary transitions to automaton
for node_from, node_to in graph.edges():
edge_data = graph.get_edge_data(node_from, node_to)[0]["label"]
nfa.add_transition(node_from, edge_data, node_to)
if (start_nodes and final_nodes) is None:
if not nfa.states:
for node in graph.nodes:
nfa.add_start_state(State(node))
nfa.add_final_state(State(node))
else:
for state in nfa.states:
nfa.add_start_state(state)
nfa.add_final_state(state)
return nfa
if start_nodes:
for start_node in start_nodes:
state = State(start_node)
if state not in nfa.states:
raise ValueError(f"\nNode {start_node} does not present in the graph")
nfa.add_start_state(state)
if final_nodes:
for final_node in final_nodes:
state = State(final_node)
if state not in nfa.states:
raise ValueError(f"\nNode {final_node} does not present in the graph")
nfa.add_final_state(state)
return nfa
|
446d75dce1e9f305c4fd2ac39ba0f0efec67fa22
| 3,638,872
|
import math
def print_key_val(init, value, pre_indent=0, end=','):
"""Print the key and value and insert it into the code list.
:param init: string to initialize value e.g.
"'key': " or "url = "
:param value: value to print in the dictionary
:param pre_indent: optional param to set the level of indentation,
defaults to 0
:param end: optional param to set the end, defaults to comma
"""
indent = INDENT * pre_indent
# indent is up to the first single quote
start = indent + len(init)
# 80 is the print line minus the starting indent
# minus 2 single quotes, 1 space, and 1 backslash
left = PRINTLINE - start - 4
code = []
code.append("{i}{s}'{v}'".format(i=" " * indent, s=init, v=value[:left]))
if len(value) > left:
code[-1] += " \\"
# figure out lines by taking the length of the value and dividing by
# chars left to the print line
lines = int(math.ceil(len(value) / float(left)))
for i in xrange(1, lines):
delim = " \\"
if i == lines - 1:
delim = end
code.append("{i}'{v}'{d}".format(i=" " * start,
v=value[i * left:(i+1) * left],
d=delim))
else:
code[-1] += end
return code
|
3def14ce63cdfb1ef797172efcac10dcfacb02e7
| 3,638,873
|
def not_found_error(e):
"""HTTP 404 view"""
# Ignore unused arguments
# pylint: disable=W0613
return render_template("errors/404.html"), 404
|
c60941ee8e9ff1959b7a5968376501fb50def62c
| 3,638,874
|
def rel_error(model, X, Y, meanY =0):
"""function to compute the relative error
model : trained neural network to compute the predicted data
X : input data
Y : reference output data
meanY : the mean of the of the untreated Y """
Yhat = model.predict(X)
dY = Yhat - Y
axis = tuple(range(np.size(dY.shape))[1:])
return np.sqrt(np.sum(dY**2, axis = axis)/
np.sum((Y+meanY)**2, axis = axis))
|
51c49f13f59b22a37dadd28d7e0aefdca4598ac7
| 3,638,875
|
def read_data(vocab, path):
"""Reads a bAbI dataset.
Args:
vocab (collections.defaultdict): A dictionary storing word IDs.
path (str): Path to bAbI data file.
Returns:
list of Query of Sentence: Parsed lines.
"""
data = []
all_data = []
with open(path) as f:
for line in f:
sid, content = line.strip().split(' ', 1)
if sid == '1':
if len(data) > 0:
all_data.append(data)
data = []
data.append(parse_line(vocab, content))
if len(data) > 0:
all_data.append(data)
return all_data
|
ada97a2c94ec832af45aa91531f7f1e137522cb1
| 3,638,876
|
from typing import Type
from typing import Optional
import time
def get_dataset(cfg: DatasetConfig,
shard_id: int,
num_shards: int,
feature_converter_cls: Type[seqio.FeatureConverter],
num_epochs: Optional[int] = None,
continue_from_last_checkpoint: bool = False) -> tf.data.Dataset:
"""Returns a dataset from SeqIO based on a `DatasetConfig`."""
if continue_from_last_checkpoint:
raise ValueError(
'`continue_from_last_checkpoint` must be set to False as this is not '
'supported by this dataset fn.')
del continue_from_last_checkpoint
if cfg.module:
import_module(cfg.module)
if cfg.batch_size % num_shards:
raise ValueError(
f'Batch size ({cfg.batch_size}) must be divisible by number of '
f'shards ({num_shards}).')
shard_info = seqio.ShardInfo(index=shard_id, num_shards=num_shards)
if cfg.seed is None:
# Use a shared timestamp across devices as the seed.
seed = multihost_utils.broadcast_one_to_all(np.int32(time.time()))
else:
seed = cfg.seed
return get_dataset_inner(cfg, shard_info, feature_converter_cls, seed,
num_epochs)
|
66569296980136b1eba8f577ad876e30fb66f50b
| 3,638,877
|
def contour_distances_2d(image1, image2, dx=1):
"""
Calculate contour distances between binary masks.
The region of interest must be encoded by 1
Args:
image1: 2D binary mask 1
image2: 2D binary mask 2
dx: physical size of a pixel (e.g. 1.8 (mm) for UKBB)
Returns:
mean_hausdorff_dist: Hausdorff distance (mean if input are 2D stacks) in pixels
"""
# Retrieve contours as list of the coordinates of the points for each contour
# convert to contiguous array and data type uint8 as required by the cv2 function
image1 = np.ascontiguousarray(image1, dtype=np.uint8)
image2 = np.ascontiguousarray(image2, dtype=np.uint8)
# extract contour points and stack the contour points into (N, 2)
contours1, _ = cv2.findContours(image1.astype('uint8'), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour1_pts = np.array(contours1[0])[:, 0, :]
for i in range(1, len(contours1)):
cont1_arr = np.array(contours1[i])[:, 0, :]
contour1_pts = np.vstack([contour1_pts, cont1_arr])
contours2, _ = cv2.findContours(image2.astype('uint8'), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour2_pts = np.array(contours2[0])[:, 0, :]
for i in range(1, len(contours2)):
cont2_arr = np.array(contours2[i])[:, 0, :]
contour2_pts = np.vstack([contour2_pts, cont2_arr])
# distance matrix between two point sets
dist_matrix = np.zeros((contour1_pts.shape[0], contour2_pts.shape[0]))
for i in range(contour1_pts.shape[0]):
for j in range(contour2_pts.shape[0]):
dist_matrix[i, j] = np.linalg.norm(contour1_pts[i, :] - contour2_pts[j, :])
# symmetrical mean contour distance
mean_contour_dist = 0.5 * (np.mean(np.min(dist_matrix, axis=0)) + np.mean(np.min(dist_matrix, axis=1)))
# calculate Hausdorff distance using the accelerated method
# (doesn't really save computation since pair-wise distance matrix has to be computed for MCD anyways)
hausdorff_dist = directed_hausdorff(contour1_pts, contour2_pts)[0]
return mean_contour_dist * dx, hausdorff_dist * dx
|
14ebed56942c6dbbd674e08af1743e443c9b1267
| 3,638,878
|
def weighted_sequence_identity(a, b, weights, gaps='y'):
"""Compute the sequence identity between two sequences, different positions differently
The definition of sequence_identity is ambyguous as it depends on how gaps are treated,
here defined by the *gaps* argument. For details and examples, see
`this page <https://pyaln.readthedocs.io/en/latest/tutorial.html#sequence-identity>`_
Parameters
----------
a : str
first sequence, with gaps encoded as "-"
b : str
second sequence, with gaps encoded as "-"
weights : list of float
list of weights. Any iterable with the same length as the two input sequences
(including gaps) is accepted. The final score is divided by their sum
(except for positions not considered, as defined by the gaps argument).
gaps : str
defines how to take into account gaps when comparing sequences pairwise. Possible values:
- 'y' : gaps are considered and considered mismatches. Positions that are gaps in both sequences are ignored.
- 'n' : gaps are not considered. Positions that are gaps in either sequences compared are ignored.
- 't' : terminal gaps are trimmed. Terminal gap positions in either sequences are ignored, others are considered as in 'y'.
- 'a' : gaps are considered as any other character; even gap-to-gap matches are scored as identities.
Returns
-------
float
sequence identity between the two sequences
Examples
--------
>>> weighted_sequence_identity('ATGCA',
... 'ATGCC', weights=[1, 1, 1, 1, 6])
0.4
>>> weighted_sequence_identity('ATGCA',
... 'ATGCC', weights=[1, 1, 1, 1, 1])
0.8
Note
----
To compute sequence identity efficiently among many sequences, use :func:`~pyaln.Alignment.score_similarity` instead.
See also
--------
pyaln.Alignment.score_similarity, weighted_sequence_identity
"""
if len(a)!=len(b):
raise IndexError('sequence_identity ERROR sequences do not have the same length')
if len(weights)!=len(a):
raise IndexError('sequence_identity ERROR weights must be the same length as sequences')
if gaps=='y':
pos_to_remove=[i for i in range(len(a)) if a[i]=='-' and b[i]=='-' ]
elif gaps=='n':
pos_to_remove=[i for i in range(len(a)) if a[i]=='-' or b[i]=='-' ]
elif gaps=='t':
pos_to_remove=[i for i in range(len(a)) if a[i]=='-' and b[i]=='-' ]
for s in [a,b]:
for i,c in enumerate(s):
if c=='-':
pos_to_remove.append(i)
else:
break
for i, c in reversed(list(enumerate(s))):
if c=='-':
pos_to_remove.append(i)
else:
break
elif gaps=='a':
total_weight= sum( weights )
count_identical=sum([int(ca == b[i])*weights[i] for i,ca in enumerate(a)])
return count_identical/total_weight if total_weight else 0.0
else:
raise Exception('sequence_identity ERROR gaps argument must be one of {a, y, n, t}')
exclude_pos=set(pos_to_remove)
actual_weights=[w for i,w in enumerate(weights) if not i in exclude_pos]
total_weight= sum( actual_weights )
count_identical=sum([int(ca == b[i] and ca!='-' )*weights[i] for i,ca in enumerate(a) if not i in exclude_pos])
return count_identical/( total_weight ) if total_weight else 0.0
|
8face090454e984d0d4b9ea5fe78b6600a6e6b03
| 3,638,879
|
def loss_fd(batch,model,reconv_psf_image,step=0.01):
"""
Defines a loss respective to unit shear response
Args:
batch: tf batch
Image stamps as ['obs_image'] and psf models as ['psf_image']
reconv_psf_image: tf tensor
Synthetic reconvolution psf
step: float
Step size for the finite differences
Returns:
lost: float
Distance between the shear response matrix and unity.
"""
shears = tf.random.uniform((batch_size,2),-.1,.1,dtype=tf.float32)
#compute response
R = get_metacal_response_finitediff(batch['obs_image'],
batch['psf_image'],
reconv_psf_image,
shear=shears,
step=step,
method=model)[1]
lost = tf.norm(R - tf.eye(2))
return lost
|
910db625562a4b882a1205e3235cdf375b0e9788
| 3,638,880
|
from typing import List
import math
def batch_tokenize(sentences: List[str],
tokenizer: yttm.BPE,
batch_size: int = 256,
bos: bool = True,
eos: bool = True) -> List[List[int]]:
"""
Tokenize input sentences in batches.
:param sentences: sentences to tokenize
:param tokenizer: trained tokenizer model
:param batch_size: amount of sentences in each batch
:param bos: whether to add <BOS> symbol at the beginning of each sentence
:param eos: whether to add <EOS> symbol at the end of each sentence
:return: a list of tokenized sentences, where each sentence is represented as a list of integers
"""
tokenized = []
for i_batch in range(math.ceil(len(sentences) / batch_size)):
tokenized.extend(
tokenizer.encode(
list(sentences[i_batch * batch_size:(i_batch + 1) * batch_size]), bos=bos, eos=eos)
)
return tokenized
|
69a5995d52b255bbdbe5a8c9d9ac3c2bf0bbbee8
| 3,638,881
|
def blog_index(request):
"""The index of all blog posts"""
ctx = {}
entries = Page.objects.filter(blog_entry=True).order_by(
'-pinned', '-date_created')
paginator = Paginator(entries, 10)
page_num = request.GET.get('page')
ctx['BASE_URL'] = settings.BASE_URL
try:
entries = paginator.page(page_num)
except PageNotAnInteger:
entries = paginator.page(1)
except EmptyPage:
entries = paginator.page(paginator.num_pages)
ctx['entries'] = entries
return render(request, 'cms/blog_index.html', ctx)
|
ddc52e4d58adc5857d9f47345f8a80e472f68f81
| 3,638,882
|
def pair_time(pos_k, pos_l, vel_k, vel_l, radius):
""" pos_k, pos_l, vel_k, vel_l all have two elements as a list """
t_0=0.0
pos_x=pos_l[0]-pos_k[0]
pos_y=pos_l[1]-pos_k[1]
Delta_pos=np.array([pos_x, pos_y])
vel_x=vel_l[0]-vel_k[0]
vel_y=vel_l[1]-vel_k[1]
Delta_vel=np.array([vel_x, vel_y])
Upsilon=(Delta_pos.dot(Delta_vel))**2-(Delta_vel.dot(Delta_vel))*((Delta_pos.dot(Delta_pos))-4.0*radius**2)
if Upsilon>0.0 and Delta_pos.dot(Delta_vel)<0.0: return t_0-(Delta_pos.dot(Delta_vel)+m.sqrt(Upsilon))/(Delta_vel.dot(Delta_vel))
else: return float(oo)
|
9c751d7937bcbd71e6e1243ddff809c4d42113f2
| 3,638,884
|
from typing import Union
from pathlib import Path
from typing import List
from typing import Dict
def coco_to_shapely(inpath_json: Union[Path, str],
categories: List[int] = None) -> Dict:
"""Transforms COCO annotations to shapely geometry format.
Args:
inpath_json: Input filepath coco json file.
categories: Categories will filter to specific categories and images that contain at least one
annotation of that category.
Returns:
Dictionary of image key and shapely Multipolygon.
"""
data = utils.other.load_json(inpath_json)
if categories is not None:
# Get image ids/file names that contain at least one annotation of the selected categories.
image_ids = sorted(list(set([x['image_id'] for x in data['annotations'] if x['category_id'] in categories])))
else:
image_ids = sorted(list(set([x['image_id'] for x in data['annotations']])))
file_names = [x['file_name'] for x in data['images'] if x['id'] in image_ids]
# Extract selected annotations per image.
extracted_geometries = {}
for image_id, file_name in zip(image_ids, file_names):
annotations = [x for x in data['annotations'] if x['image_id'] == image_id]
if categories is not None:
annotations = [x for x in annotations if x['category_id'] in categories]
segments = [segment['segmentation'][0] for segment in annotations] # format [x,y,x1,y1,...]
# Create shapely Multipolygons from COCO format polygons.
mp = MultiPolygon([Polygon(np.array(segment).reshape((int(len(segment) / 2), 2))) for segment in segments])
extracted_geometries[str(file_name)] = mp
return extracted_geometries
|
0762cebe554336080fcf2f4f31b37db0ee991b15
| 3,638,885
|
def create_activity_specific_breathing_rate_df(
person_breathing_in,
time,
event,
breathing_rate_key,
rounding=5
):
"""
Generate breathing rates taking into account age and activity intensity.
Parameters:
person_breathing_in: string
E.g. "person 1"
time: string
E.g. "2022-01"
event: string
E.g. "work", "party"
Returns: tuple(pd.DataFrame)
"""
age_key = f'age_({person_breathing_in})'
person_time_event_index = index_name(
time,
event,
person_breathing_in
)
activity_key = f'activity_{person_time_event_index}'
# below is in cubic meters per minute
keys = {
'Sleep or Nap': {
'31 to <41':
np.arange(0.0046, 0.0066, 0.0001).round(rounding),
},
'Sedentary/Passive': {
'31 to <41':
np.arange(0.0043, 0.0066, 0.0001).round(rounding),
},
'Light Intensity': {
'31 to <41':
np.arange(0.012, 0.016, 0.0001).round(rounding),
},
'Moderate Intensity': {
'31 to <41':
np.arange(0.026, 0.038, 0.0001).round(rounding),
},
'High Intensity': {
'31 to <41':
np.arange(0.049, 0.072, 0.0001).round(rounding),
}
}
collection = []
for activity, ages in keys.items():
for age, rng in ages.items():
df = pd.DataFrame(
{
breathing_rate_key: rng,
activity_key: [
activity for _ in range(len(rng))
],
age_key: [
age for _ in range(len(rng))
],
'value': 1
}
)
collection.append(df)
cpt_df = pd.concat(collection)
# So that the result is in cubic meters per hour
cpt_df[breathing_rate_key] = cpt_df[breathing_rate_key] * 60
return cpt_df
|
953145a13166a0a6b84ed173a72213820d7b48b6
| 3,638,886
|
def find_valid_imported_name(name):
"""return a name preceding an import op, or False if there isn't one"""
return name.endswith(MARKER) and remove_import_op(name)
|
cd7473a8852ff3962fedf970d63564929911a808
| 3,638,887
|
def parseText(text1, nlp):
"""Run the Spacy parser on the input text that is converted to unicode."""
doc = nlp(text1)
return doc
|
99d6a585358a700f8fc48c5dc4fc761a03ab42a7
| 3,638,888
|
def view_pebbles_home(request):
"""Serve up the workspace, the current home page.
Include global js settings"""
app_config = AppConfiguration.get_config()
if app_config is None:
return HttpResponseRedirect(reverse('view_no_domain_config_error'))
# Is this D3M Mode? If so, make sure:
# (1) there is D3M config information
# (2) user is logged in
#
if app_config.is_d3m_domain():
# (1) Is there a valid D3M config?
d3m_config = get_latest_d3m_config()
if not d3m_config:
return HttpResponseRedirect(\
reverse('view_d3m_config_error'))
# (2) Is the user authenticated?
if not request.user.is_authenticated():
return HttpResponseRedirect(\
reverse('login'))
session_key = get_session_key(request)
dinfo = dict(title='TwoRavens',
session_key=session_key,
app_config=app_config.convert_to_dict())
return render(request,
'index.html',
dinfo)
|
c19dc920c9d064b2294d0f92eb762197271ace2e
| 3,638,891
|
from openpyxl import load_workbook
def multi_pretty(request):
""" 批量添加(Excel文件)"""
if request.method == "GET":
return render(request, 'multi_pretty.html')
file_object = request.FILES.get("exc")
wb = load_workbook(file_object)
sheet = wb.worksheets[0]
for row in sheet.iter_rows(min_row=2):
mobile = row[0].value
price = row[1].value
level = row[2].value
status = row[3].value
exists = models.PrettyNum.objects.filter(mobile=mobile).exists()
if not exists:
models.PrettyNum.objects.create(mobile=mobile, price=price, level=level, status=status)
return redirect('/pretty/list/')
|
571ce48fb1b0c13d85687cfcaba556599d299b00
| 3,638,892
|
import socket
def is_port_open(host, port, timeout=5):
"""
verifies if a port is open in a remote host
:param host: IP of the remote host
:type host: str
:param port: port to check
:type port: int
:param timeout: timeout max to check
:type timeout: int
:return: True if the port is open
:rtype: bool
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((host, port))
return result is 0
|
1805727a3bb007cd5686475de686cf7bceac83a1
| 3,638,893
|
def check_transport_reaction_gpr_presence(model):
"""Return the list of transport reactions that have no associated gpr."""
return [
rxn
for rxn in helpers.find_transport_reactions(model)
if not rxn.gene_reaction_rule
]
|
2bce7a49ce46a7dcd4b756cd9bc115f7bb227b36
| 3,638,894
|
def get_RSA_modulus(b, num):
"""
Generates a list of RSA modulus' of bit length b
such that each modulus, n = pq, where p and q are
both primes.
:param b: bit length of the modulus
:param num: number of modulus' you want
:returns: a list of RSA modulus'
"""
p_lst = []
# generate primes of length b/2 because when we multiply 2 together, the length
# will be b-bits long
for _ in range(0, num + 1):
p_lst.append(get_prime(b // 2))
n_list = []
for i in range(0, num):
n_list.append(p_lst[i] * p_lst[i + 1])
return n_list
|
5238f840ab6c732e852e195c75cd39ad35e709f1
| 3,638,895
|
from multiprocessing import cpu_count
def use_processors(n_processes):
"""
This routine finds the number of available processors in your machine
"""
available_processors = cpu_count()
n_processes = n_processes % (available_processors+1)
if n_processes == 0:
n_processes = 1
print('WARNING: Found n_processes = 0. Falling back to default single-threaded execution (n_processes = 1).')
return n_processes
|
73877393aac6b4da68fb0216bf046601ce1fa99e
| 3,638,896
|
import json
def climate_radio_thermostat_ct101_multiple_temp_units_state_fixture():
"""Load the climate multiple temp units node state fixture data."""
return json.loads(
load_fixture(
"zwave_js/climate_radio_thermostat_ct101_multiple_temp_units_state.json"
)
)
|
7a3493b8d065752b21e93d7fd2750b59f6079db8
| 3,638,897
|
def GetPseudoAAC1(ProteinSequence, lamda=30, weight=0.05, AAP=[]):
"""
#######################################################################################
Computing the first 20 of type I pseudo-amino acid compostion descriptors based on the given
properties.
########################################################################################
"""
rightpart = 0.0
for i in range(lamda):
rightpart = rightpart + GetSequenceOrderCorrelationFactor(
ProteinSequence, i + 1, AAP
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * rightpart
for index, i in enumerate(AALetter):
result["PAAC" + str(index + 1)] = round(AAC[i] / temp, 3)
return result
|
d451a5a2f9674a84a4a114d58a427e271721bfb9
| 3,638,899
|
def reversedict(dct):
""" Reverse the {key:val} in dct to
{val:key}
"""
# print labelmap
newmap = {}
for (key, val) in dct.iteritems():
newmap[val] = key
return newmap
|
f7a5a102546270a2e6aa7fb52d4fa6dd5e826753
| 3,638,900
|
def mock_graph_literal():
"""Creates a mock tree
Metasyntactic variables: https://www.ietf.org/rfc/rfc3092.txt
"""
graph_dict = [
{
"frame": {"name": "foo", "type": "function"},
"metrics": {"time (inc)": 130.0, "time": 0.0},
"children": [
{
"frame": {"name": "bar"},
"metrics": {"time (inc)": 20.0, "time": 5.0},
"children": [
{
"frame": {"name": "baz", "type": "function"},
"metrics": {"time (inc)": 5.0, "time": 5.0},
},
{
"frame": {"name": "grault"},
"metrics": {"time (inc)": 10.0, "time": 10.0},
},
],
},
{
"frame": {"name": "qux", "type": "function"},
"metrics": {"time (inc)": 60.0, "time": 0.0},
"children": [
{
"frame": {"name": "quux"},
"metrics": {"time (inc)": 60.0, "time": 5.0},
"children": [
{
"frame": {"name": "corge", "type": "function"},
"metrics": {"time (inc)": 55.0, "time": 10.0},
"children": [
{
"frame": {"name": "bar"},
"metrics": {
"time (inc)": 20.0,
"time": 5.0,
},
"children": [
{
"frame": {
"name": "baz",
"type": "function",
},
"metrics": {
"time (inc)": 5.0,
"time": 5.0,
},
},
{
"frame": {"name": "grault"},
"metrics": {
"time (inc)": 10.0,
"time": 10.0,
},
},
],
},
{
"frame": {"name": "grault"},
"metrics": {
"time (inc)": 10.0,
"time": 10.0,
},
},
{
"frame": {
"name": "garply",
"type": "function",
},
"metrics": {
"time (inc)": 15.0,
"time": 15.0,
},
},
],
}
],
}
],
},
{
"frame": {"name": "waldo", "type": "function"},
"metrics": {"time (inc)": 50.0, "time": 0.0},
"children": [
{
"frame": {"name": "fred", "type": "function"},
"metrics": {"time (inc)": 35.0, "time": 5.0},
"children": [
{
"frame": {"name": "plugh", "type": "function"},
"metrics": {"time (inc)": 5.0, "time": 5.0},
},
{
"frame": {"name": "xyzzy", "type": "function"},
"metrics": {"time (inc)": 25.0, "time": 5.0},
"children": [
{
"frame": {
"name": "thud",
"type": "function",
},
"metrics": {
"time (inc)": 25.0,
"time": 5.0,
},
"children": [
{
"frame": {
"name": "baz",
"type": "function",
},
"metrics": {
"time (inc)": 5.0,
"time": 5.0,
},
},
{
"frame": {
"name": "garply",
"type": "function",
},
"metrics": {
"time (inc)": 15.0,
"time": 15.0,
},
},
],
}
],
},
],
},
{
"frame": {"name": "garply", "type": "function"},
"metrics": {"time (inc)": 15.0, "time": 15.0},
},
],
},
],
},
{
"frame": {"name": "waldo", "type": "function"},
"metrics": {"time (inc)": 30.0, "time": 10.0},
"children": [
{
"frame": {"name": "bar"},
"metrics": {"time (inc)": 20.0, "time": 5.0},
"children": [
{
"frame": {"name": "baz", "type": "function"},
"metrics": {"time (inc)": 5.0, "time": 5.0},
},
{
"frame": {"name": "grault"},
"metrics": {"time (inc)": 10.0, "time": 10.0},
},
],
}
],
},
]
return graph_dict
|
4b65f0dfffe705963c1041fbbef65d85af306f4f
| 3,638,903
|
def parse_ADD_ins(tokens):
"""Attempts to parse an ADD instruction."""
failure = None
assert len(tokens) > 0
if tokens[0].text.upper() != 'ADD':
return failure
statement = Obj()
statement.type = 'STATEMENT'
statement.statement_type = 'INSTRUCTION'
statement.instruction = 'ADD'
operands = parse_operands_DR_SR1_SR2(tokens[1:])
if operands:
statement.operands = operands
return statement
operands = parse_operands_DR_SR1_imm5(tokens[1:])
if operands:
statement.operands = operands
return statement
return failure
|
ffc515d0079dbaf860a10de675542e798abfd4a3
| 3,638,904
|
import re
def commonIntegerPredicate(field):
""""return any integers"""
return tuple(re.findall("\d+", field))
|
955dc61fa4293f21c707b538ea218b15d5a95fb2
| 3,638,905
|
def spatialft(image, cosine_window=True, rmdc=True):
"""Take the fourier transform of an image (or flow field).
shift the quadrants around so that low spatial frequencies are in
the center of the 2D fourier transformed image"""
#raised cosyne window on image to avoid border artifacts
(dim1,dim2) = np.shape(image)
if(cosine_window):
cosfilter = np.tile(np.hanning(dim2), (dim1,1))*(np.tile(np.hanning(dim1), (dim2,1)).T)
image = image * cosfilter
# remove DC component
if(rmdc):
image = image - np.mean(image)
ps = np.abs(np.fft.fftshift(np.fft.fft2(image)))**2
fqs = [np.fft.fftshift(np.fft.fftfreq(np.shape(image)[0])),
np.fft.fftshift(np.fft.fftfreq(np.shape(image)[1]))]
return(ps, fqs)
|
cafca20ec79dcaca6d6dfb11c18156077b172ab0
| 3,638,906
|
def _get_instrument_parameters(ufile, filemetadata):
""" Return a dictionary containing instrument parameters. """
# pulse width
pulse_width = filemetadata('pulse_width')
pulse_width['data'] = ufile.get_pulse_widths() / _LIGHT_SPEED # m->sec
# assume that the parameters in the first ray represent the beam widths,
# bandwidth and frequency in the entire volume
first_ray = ufile.rays[0]
field_header = first_ray.field_headers[0]
beam_width_h = field_header['beam_width_h'] / 64.
beam_width_v = field_header['beam_width_v'] / 64.
bandwidth = field_header['bandwidth'] / 16. * 1.e6
wavelength_cm = field_header['wavelength_cm'] / 64.
wavelength_hz = _LIGHT_SPEED / (wavelength_cm / 100.)
# radar_beam_width_h
radar_beam_width_h = filemetadata('radar_beam_width_h')
radar_beam_width_h['data'] = np.array([beam_width_h], dtype='float32')
# radar_beam_width_v
radar_beam_width_v = filemetadata('radar_beam_width_w')
radar_beam_width_v['data'] = np.array([beam_width_v], dtype='float32')
# radar_receiver_bandwidth
radar_receiver_bandwidth = filemetadata('radar_receiver_bandwidth')
radar_receiver_bandwidth['data'] = np.array([bandwidth], dtype='float32')
# polarization_mode
polarization_mode = filemetadata('polarization_mode')
polarization_mode['data'] = ufile.get_sweep_polarizations()
# frequency
frequency = filemetadata('frequency')
frequency['data'] = np.array([wavelength_hz], dtype='float32')
# prt
prt = filemetadata('prt')
prt['data'] = ufile.get_prts() / 1e6 # us->sec
instrument_parameters = {
'pulse_width': pulse_width,
'radar_beam_width_h': radar_beam_width_h,
'radar_beam_width_v': radar_beam_width_v,
'radar_receiver_bandwidth': radar_receiver_bandwidth,
'polarization_mode': polarization_mode,
'frequency': frequency,
'prt': prt,
}
# nyquist velocity if defined
nyquist_velocity = filemetadata('nyquist_velocity')
nyquist_velocity['data'] = ufile.get_nyquists()
if nyquist_velocity['data'] is not None:
instrument_parameters['nyquist_velocity'] = nyquist_velocity
return instrument_parameters
|
af6ee2097848a672ec18c2199faece072f3990f1
| 3,638,907
|
import re
def split_reaction(reac):
""" split a CHEMKIN reaction into reactants and products
:param reac: reaction string
:type reac: str
:returns: reactants and products
:rtype: (tuple of strings, tuple of strings)
"""
em_pattern = one_of_these([PAREN_PLUS_EM + STRING_END,
PLUS_EM + STRING_END])
reactant_str, product_str = re.split(PADDED_ARROW, reac)
reactant_str = re.sub(em_pattern, '', reactant_str)
product_str = re.sub(em_pattern, '', product_str)
en_reactants = tuple(map(_expand_en_reagents,
map(strip_spaces,
re.split(PADDED_PLUS, reactant_str))))
en_products = tuple(map(_expand_en_reagents,
map(strip_spaces,
re.split(PADDED_PLUS, product_str))))
reactants = tuple(chain(*en_reactants))
products = tuple(chain(*en_products))
return reactants, products
|
d9ccbf02bd8f037d42f9de5f30612a99a1d7d918
| 3,638,909
|
def bond_stereo_parities(sgr):
""" bond parities, as a dictionary
"""
return mdict.by_key_by_position(bonds(sgr), bond_keys(sgr),
BND_STE_PAR_POS)
|
7b80fdb861530e4389a83db1ef55f9552baf758d
| 3,638,910
|
def encode(text):
"""
Encode to base64
"""
return [int(x) for x in text.encode('utf8')]
|
af51272d8edc25d46695ea3b35fd395ad26321b5
| 3,638,911
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.