content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def make_user_role_table(table_name='user', id_column_name='id'):
"""
Create the user-role association table so that
it correctly references your own UserMixin subclass.
"""
return db.Table('fp_user_role',
db.Column(
'user_id', db.Integer, db.ForeignKey('{}.{}'.format(
table_name, id_column_name))),
db.Column(
'role_id', db.Integer, db.ForeignKey('fp_role.id')),
extend_existing=True)
|
8e7570590686e78d2bf7f91ba3b16f14f4c42620
| 3,638,572
|
import re
def _remove_comments_inline(text):
"""Removes the comments from the string 'text'."""
if 'auto-ignore' in text:
return text
if text.lstrip(' ').lstrip('\t').startswith('%'):
return ''
match = re.search(r'(?<!\\)%', text)
if match:
return text[:match.end()] + '\n'
else:
return text
|
463e29e1237a88e91c13a58ffea1b2ccdafd4a1d
| 3,638,573
|
def wide_to_tall(df: pd.DataFrame) -> pd.DataFrame:
"""Convert a wide table to a tall table
Args:
df (pd.DataFrame): wide table
Returns:
pd.DataFrame: tall table
"""
return df.unstack().dropna().reset_index()
|
50ab71d18f5fb1e4dba9207b71030c7f8ffdbcde
| 3,638,574
|
def is_pj_player_plus(value):
"""
:param value: The value to be checked
:type value: Any
:return: whether or not the value is a PJ Player+
:rtype: bool
"""
return isinstance(value, list) and len(value) == 4 or len(value) == 3
|
1c4e7a7513d746d25f6b3d7964455b0735c988fc
| 3,638,575
|
def pd_fuzz_partial_token_sort_ratio(col1, col2):
""" Calculate "partial token sort" ratio (`fuzz.partial_token_sort_ratio`) between two text columns.
Args:
col1 (Spark Column): 1st text column
col2 (Spark Column): 2nd text column
Returns:
Spark Column (IntegerType): result of `fuzz.partial_token_sort_ratio` calculation.
"""
return pd.Series(map(fuzz.partial_token_sort_ratio, col1.astype(str), col2.astype(str)))
|
d650d37d5936751f961260d98210e2d219200fe6
| 3,638,576
|
def looterCanReinforce(mine: Game) -> bool:
"""
Return True if, in the given game, the looter (the attack) can
reinforce at this moment, regardless of whether its the first or the
second time
"""
return getLooterReinforcementStatus(mine) != 0
|
e73fb193cc1c621766900c1f484db90e4e21decb
| 3,638,577
|
def _get_normed_sym_np(X_, _eps=DEFAULT_EPS):
"""
Compute the normalized and symmetrized probability matrix from
relative probabilities X_, where X_ is a numpy array
Parameters
----------
X_ : 2-d array_like (N, N)
asymmetric probabilities. For instance, X_(i, j) = P(i|j)
Returns
-------
P : 2-d array_like (N, N)
symmetric probabilities, making the assumption that P(i|j) = P(j|i)
Diagonals are all 0s."""
batch_size = X_.shape[0]
zero_diags = 1.0 - np.identity(batch_size)
X_ *= zero_diags
norm_facs = np.sum(X_, axis=0, keepdims=True)
X_ = X_ / (norm_facs + _eps)
X_ = 0.5*(X_ + np.transpose(X_))
return X_
|
a6f5762a5bf41c83bd017d0661cc069f17bee618
| 3,638,578
|
def load_encoding_model():
"""Model to encode image as vector of length 4096 using 2nd to last layer of
VGG16"""
base_model = VGG16(weights='imagenet', include_top=True)
encoding_model = Model(inputs=base_model.input,
outputs=base_model.get_layer('fc2').output)
return encoding_model
|
b15f9d9b6d360a71db0fcb7fc0fa83c031f34047
| 3,638,579
|
import math
def get_geohash_radius_approximation(latitude, longitude, radius, precision, georaptor_flag=False, minlevel=1, maxlevel=12):
"""
Get the list of geohashed that approximate a circle
:param latitude: Float the longitude to get the radius approximation for
:param longitude: Float the latitude to get the radius approximation for
:param radius: Integer Radius coverage in meters
:param precision: Integer the geohash precision level
:param georaptor_flag: Do you want to compress it with georaptor
:param minlevel: minimal precision level possible
:param maxlevel: maximal precision level possible
:return: A list of geohashes
"""
x = 0.0
y = 0.0
points = []
geohashes = []
grid_width = [5009400.0, 1252300.0, 156500.0, 39100.0, 4900.0, 1200.0, 152.9, 38.2, 4.8, 1.2, 0.149, 0.0370]
grid_height = [4992600.0, 624100.0, 156000.0, 19500.0, 4900.0, 609.4, 152.4, 19.0, 4.8, 0.595, 0.149, 0.0199]
height = (grid_height[precision - 1]) / 2
width = (grid_width[precision - 1]) / 2
lat_moves = int(math.ceil(radius / height)) # 4
lon_moves = int(math.ceil(radius / width)) # 2
for i in range(0, lat_moves):
temp_lat = y + height * i
for j in range(0, lon_moves):
temp_lon = x + width * j
if in_circle_check(temp_lat, temp_lon, y, x, radius):
x_cen, y_cen = get_centroid(temp_lat, temp_lon, height, width)
lat, lon = convert_to_latlon(y_cen, x_cen, latitude, longitude)
points += [[lat, lon]]
lat, lon = convert_to_latlon(-y_cen, x_cen, latitude, longitude)
points += [[lat, lon]]
lat, lon = convert_to_latlon(y_cen, -x_cen, latitude, longitude)
points += [[lat, lon]]
lat, lon = convert_to_latlon(-y_cen, -x_cen, latitude, longitude)
points += [[lat, lon]]
for point in points:
geohashes += [pgh.encode(point[0], point[1], precision)]
if georaptor_flag:
georaptor_out = georaptor.compress(set(geohashes), int(minlevel), int(maxlevel))
return list(georaptor_out)
else:
return list(set(geohashes))
|
cf8bbc4a8323b796c4f325f4f3ab9f8e3a169fa8
| 3,638,580
|
def manage_products(request, category_id, template_name="manage/category/products.html"):
"""
"""
category = Category.objects.get(pk=category_id)
inline = products_inline(request, category_id, True)
# amount options
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("category-products-amount")
})
return render_to_string(template_name, RequestContext(request, {
"category": category,
"products_inline": inline,
"amount_options": amount_options,
}))
|
4ece15c50e00198c422dbb452622dde938f2a9e6
| 3,638,581
|
def random_indices(X, size=None, p=None, sort_indices=True, **kwargs):
""" Get indices for a random subset of the data.
Parameters
----------
size: int
* integer size to sample (required if p=None)
p: float
* threshold percentage to keep (required if size=None)
Returns
-------
indices: tuple of np.ndarrays
* indices of samples in the data set
"""
assert(size or p)
# convert p (i.e., percentage of points) to integer size
if size is None:
size = int(p / 100. * len(X))
# Get original indices
indices = np.arange(len(X))
# Get randomized indices
indices = np.random.choice(indices, int(size), replace=False)
# Sort indices
if sort_indices is True:
indices = np.sort(indices)
return indices
|
680be93345ab5e3065a43fda5216a4ca8b986121
| 3,638,582
|
def get_facts(F5, uri):
"""
Issue a GET of the URI specified to the F5 appliance and return the result as facts.
If the URI must have a slash as the first character, add it if missing
In Ansible 2.2 found name clashing
http://stackoverflow.com/questions/40281706/cant-read-custom-facts-with-list-array-of-items
"""
result = { 'ansible_facts': {} }
if uri[0] != "/":
uri = "/" + uri
status, result["ansible_facts"] = F5.genericGET(uri)
try:
result["ansible_facts"]["bigip_items"] = result["ansible_facts"].pop("items") # replace key name of 'items' with 'bigip_items'
except:
result["ansible_facts"]["bigip_items"] = dict()
return status, result
|
554cc7b9bf35d631c8742614142f5aa2ecaba9b4
| 3,638,583
|
from typing import Optional
from typing import Sequence
def parse_args(args: Optional[Sequence[str]] = None) -> Namespace:
"""
Parses args and validates the consistency of origin/target using the
generator
"""
parser = ArgumentParser(
prog="python -m luh3417.transfer",
description="Transfers a WordPress to one location to the other",
)
parser.add_argument(
"-g",
"--settings-generator",
help="A Python script that handles the transitions",
type=generator_validator,
required=True,
)
parser.add_argument("origin", help="Origin environment")
parser.add_argument("target", help="Target environment")
parsed = parser.parse_args(args)
for env in ["origin", "target"]:
env_name = getattr(parsed, env)
try:
parsed.settings_generator.get_source(env_name)
except UnknownEnvironment as e:
parser.error(
f'Environment "{env_name}" not recognized by generator: {e.message}'
)
if not parsed.settings_generator.allow_transfer(parsed.origin, parsed.target):
parser.error(
f"Generator does not allow transfer from {parsed.origin} to {parsed.target}"
)
return parsed
|
475318fc9999b7b259a073e53b3b24d5ea46911a
| 3,638,584
|
def parse_papers_plus_json(data):
""" Function which parses the papers_plus json and returns a pandas dataframe of the results.
Solr Field definition shown below:
<!-- Citing paper fields: papers, metadata, arxiv_metadata -->
<!-- Papers -->
<field name="sentencenum" type="pint" indexed="true" stored="true" multiValued="false"/>
<field name="sentence" type="text_classic" indexed="true" stored="true" multiValued="false"/>
<field name="arxiv_identifier" type="string" indexed="true" stored="true" multiValued="false"/>
<!-- arxiv metadata-->
<field name="arxiv_url" type="string" indexed="true" stored="true" multiValued="false"/>
<field name="authors" type="text_classic" indexed="true" stored="true" multiValued="false"/>
<field name="title" type="text_classic" indexed="true" stored="true" multiValued="false"/>
<field name="published_date" type="pdate" indexed="true" stored="true" multiValued="false"/>
<field name="revision_dates" type="string" indexed="true" stored="true" multiValued="false"/>
<!-- meta field: dblp_url-->
<field name="dblp_url" type="string" indexed="true" stored="true" multiValued="false"/>
"""
docs = data['response']['docs']
docs_df = pd.DataFrame(docs)
docs_df = docs_df.drop(['_version_', 'id'], axis=1)
return docs_df
|
44c7a27701e265a841e07f49741f03e4b49d4b95
| 3,638,585
|
from pathlib import Path
from typing import Optional
def get_credential(config_file: Path, credential_key: str = 'api_key') -> Optional[str]:
"""
Get a single credential from yaml file. Usual case is 'api_key'
:param config_file:
:param credential_key:
:return:
"""
config = load_credentials(config_file)
credential = config.get('credentials', {}).get(credential_key, None) if config else None
return credential
|
a3e5182c4b2e3fed777f6bd52e144a6d49e4f48f
| 3,638,586
|
import functools
def authenticate_secondarily(endpoint):
"""Proper authentication for function views."""
@functools.wraps(endpoint)
def wrapper(request: HttpRequest):
if not request.user.is_authenticated:
try:
auth_result = PersonalAPIKeyAuthentication.authenticate(request)
if isinstance(auth_result, tuple) and auth_result[0].__class__.__name__ == "User":
request.user = auth_result[0]
else:
raise AuthenticationFailed("Authentication credentials were not provided.")
except AuthenticationFailed as e:
return JsonResponse({"detail": e.detail}, status=401)
return endpoint(request)
return wrapper
|
ac7a5b63c2b556e1bb42986db8110a922485b96d
| 3,638,587
|
def gather_emails_GUIDs(mailbox, search, folder):
""" Download GUID of messages passing search requirements
"""
mailbox.folder.set(folder)
return (email for email in mailbox.uids(search))
|
d75ecdeaa4f95f9108276f2be236e33934d7de01
| 3,638,588
|
def pyrolite_meltsutil_datafolder(subfolder=None):
"""
Returns the path of the pyrolite-meltsutil data folder.
Parameters
-----------
subfolder : :class:`str`
Subfolder within the pyrolite data folder.
Returns
-------
:class:`pathlib.Path`
"""
return get_module_datafolder(module="pyrolite_meltsutil", subfolder=subfolder)
|
e1ae16fff0b2fcd247c57a40e4713eb0ee13f3e7
| 3,638,589
|
from typing import List
def get_resource_record_set_cloud_formation_dict_list(hosted_zone: ResourceRecordSetList,
with_soa: str,
client: botocore.client.BaseClient, zone_id: str,
type_counter_aws_resource_record_set: dict) -> List[dict]:
"""
Provide a dict representation of a resource record set that can
be used to dump a cloud formation formatted YAML file.
:return: a dict in the form:
{
"Name": str,
"Type": str,
"TTL": str,
"ResourceRecord": [str],
"AliasTarget": {
"DNSName": str,
"HostedZoneId": str
}
}
"""
resource_record_set_cloud_formation_dict_list = []
while hosted_zone is not None:
for resource_record_set in hosted_zone.resource_record_sets:
if ((resource_record_set.type != "SOA" and resource_record_set.type != "NS")
or (with_soa and (resource_record_set.type == "SOA" or resource_record_set.type == "NS"))):
resource_record_values = [resource_record.value
for resource_record in resource_record_set.resource_records]
resource_record_set_cloud_formation_dict = {
"Name": resource_record_set.name,
"Type": resource_record_set.type
}
update_type_counter_aws_resource_record_set(type_counter_aws_resource_record_set,
resource_record_set.type)
if resource_record_set.ttl:
resource_record_set_cloud_formation_dict['TTL'] = resource_record_set.ttl
if resource_record_values:
resource_record_set_cloud_formation_dict['ResourceRecords'] = resource_record_values
if resource_record_set.alias_target:
resource_record_set_cloud_formation_dict['AliasTarget'] = {
"DNSName": resource_record_set.alias_target.dns_name,
"HostedZoneId": resource_record_set.alias_target.hosted_zone_id
}
resource_record_set_cloud_formation_dict_list.append(resource_record_set_cloud_formation_dict)
next_record_name = hosted_zone.next_record_name
if next_record_name:
hosted_zone = ResourceRecordSetList(client.list_resource_record_sets(HostedZoneId=zone_id,
StartRecordName=next_record_name))
else:
hosted_zone = None
return resource_record_set_cloud_formation_dict_list
|
c7775a45763f733e2dc2392b5073f1bf18b7177c
| 3,638,590
|
def _prepare_line(edges, nodes):
"""prepare a plotly scatter3d line plot so that a set of disconnected edges
can be drawn as a single line.
`edges` are values associated with each edge (that get mapped to colors
through a colorscale). `nodes` are pairs of (source, target) node indices
for each edge.
the color of a line segment in plotly is a mixture of the colors associated
with the points it connects. Moreover, segments that begin or end at a
point whose value is `null` are not drawn.
given edges = [eab, ecd, eef] and nodes = [(a, b), (c, d), (e, f)], this
function returns:
path_edges: eab eab 0 ecd ecd 0 eef eef 0
path_nodes: a b 0 c d 0 e f 0
moreover the javascript code replaces every third element (the '0' in the
lists above) with `null`, so only the a-b, c-d, and e-f segments will get
plotted, and their colors are correct because both their start and end
points are associated with the same value.
"""
path_edges = np.zeros(len(edges) * 3, dtype=int)
path_edges[::3] = edges
path_edges[1::3] = edges
path_nodes = np.zeros(len(nodes) * 3, dtype=int)
path_nodes[::3] = nodes[:, 0]
path_nodes[1::3] = nodes[:, 1]
return path_edges, path_nodes
|
be95f58a3938b628c89639d3311799eb359c19d2
| 3,638,592
|
import getpass
def validate_password( password:str ) -> bool:
""" Validates the password again a password policy.
Args:
password ( str, required ):
password to verify.
Returns:
valid ( bool ):
True if the password meets validity requirements.
"""
policy = PasswordPolicy.from_names(
strength=0.20,
entropybits=10,
length=6,
)
if not password:
return False
tested_pass = policy.password(password)
result = tested_pass.test()
if len(result) > 0:
print(colored('Password not strong enough. Try increasing the length of the password or the password complexity'))
return False
password_verification = getpass.getpass("Retype your password: ")
if password != password_verification:
print("Passwords do not match")
return False
return True
|
eec09ad86d89184c4f87a8c0710e3af28f874429
| 3,638,593
|
from typing import Iterable
from typing import List
from typing import Dict
from typing import Any
def build_webhooks(
handlers_: Iterable[handlers.WebhookHandler],
*,
resources: Iterable[references.Resource],
name_suffix: str,
client_config: reviews.WebhookClientConfig,
persistent_only: bool = False,
) -> List[Dict[str, Any]]:
"""
Construct the content for ``[Validating|Mutating]WebhookConfiguration``.
This function concentrates all conventions how Kopf manages the webhook.
"""
return [
{
'name': _normalize_name(handler.id, suffix=name_suffix),
'sideEffects': 'NoneOnDryRun' if handler.side_effects else 'None',
'failurePolicy': 'Ignore' if handler.ignore_failures else 'Fail',
'matchPolicy': 'Equivalent',
'rules': [
{
'apiGroups': [resource.group],
'apiVersions': [resource.version],
'resources': (
[resource.plural] if handler.subresource is None else
[f'{resource.plural}/{handler.subresource}']
),
'operations': ['*'] if handler.operation is None else [handler.operation],
'scope': '*', # doesn't matter since a specific resource is used.
}
for resource in resources
if handler.selector is not None # None is used only in sub-handlers, ignore here.
if handler.selector.check(resource)
],
'objectSelector': _build_labels_selector(handler.labels),
'clientConfig': _inject_handler_id(client_config, handler.id),
'timeoutSeconds': 30, # a permitted maximum is 30.
'admissionReviewVersions': ['v1', 'v1beta1'], # only those understood by Kopf itself.
}
for handler in handlers_
if not persistent_only or handler.persistent
]
|
fc5ca5de1f09c40e08ea8918319b07186af2fe94
| 3,638,594
|
def ndo_real(data, n):
"""mimic of gmx_fio_ndo_real in gromacs"""
return [data.unpack_real() for i in range(n)]
|
875edd4c78e591fcee1b3de30f0ed62a4d0b074d
| 3,638,595
|
from typing import Union
from typing import Optional
def get_field_type(field: Union[syntax.Field, syntax.Command], idl_file: syntax.IDLParsedSpec,
idl_file_path: str) -> Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]]:
"""Resolve and get field type of a field from the IDL file."""
parser_ctxt = errors.ParserContext(idl_file_path, errors.ParserErrorCollection())
field_type = idl_file.spec.symbols.resolve_field_type(parser_ctxt, field, field.name,
field.type)
if parser_ctxt.errors.has_errors():
parser_ctxt.errors.dump_errors()
return field_type
|
19445d7a142b940ff3cd0c445e716c070eeac489
| 3,638,596
|
def get_status():
"""get the node status and return data"""
return data({})
|
0314331d249cebfeb63941961793fe9a72e0c329
| 3,638,598
|
import tokenize
def read_orc(path, columns=None, storage_options=None, **kwargs):
"""Read cudf dataframe from ORC file(s).
Note that this function is mostly borrowed from upstream Dask.
Parameters
----------
path: str or list(str)
Location of file(s), which can be a full URL with protocol specifier,
and may include glob character if a single string.
columns: None or list(str)
Columns to load. If None, loads all.
storage_options: None or dict
Further parameters to pass to the bytes backend.
Returns
-------
cudf.DataFrame
"""
storage_options = storage_options or {}
fs, fs_token, paths = get_fs_token_paths(
path, mode="rb", storage_options=storage_options
)
schema = None
nstripes_per_file = []
for path in paths:
with fs.open(path, "rb") as f:
o = orc.ORCFile(f)
if schema is None:
schema = o.schema
elif schema != o.schema:
raise ValueError(
"Incompatible schemas while parsing ORC files"
)
nstripes_per_file.append(o.nstripes)
schema = _get_pyarrow_dtypes(schema, categories=None)
if columns is not None:
ex = set(columns) - set(schema)
if ex:
raise ValueError(
"Requested columns (%s) not in schema (%s)" % (ex, set(schema))
)
else:
columns = list(schema)
with fs.open(paths[0], "rb") as f:
meta = cudf.read_orc(f, stripe=0, columns=columns, **kwargs)
name = "read-orc-" + tokenize(fs_token, path, columns, **kwargs)
dsk = {}
N = 0
for path, n in zip(paths, nstripes_per_file):
for stripe in range(n):
dsk[(name, N)] = (
_read_orc_stripe,
fs,
path,
stripe,
columns,
kwargs,
)
N += 1
divisions = [None] * (len(dsk) + 1)
return dd.core.new_dd_object(dsk, name, meta, divisions)
|
2f26a088cd849fc21c171767a0db276844341b11
| 3,638,599
|
import torch
def get_bernoulli_sample(probs):
"""Conduct Bernoulli sampling according to a specific probability distribution.
Args:
prob: (torch.Tensor) A tensor in which each element denotes a probability of 1 in a Bernoulli distribution.
Returns:
A Tensor of binary samples (0 or 1) with the same shape of probs.
"""
if torch.cuda.is_available():
bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape, device=torch.device('cuda')))
else:
bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape))
return bernoulli_sample
|
14c45741d47f5eaff24893471425ddd4de7e2e4b
| 3,638,601
|
def angle_load(root, ext='.angle'):
"""
Load information from the :ref:`Output_angle` file previously created by :func:`.angle_save`.
Args:
root (str): root name for the file to be loaded
ext (str, optional): default ".angle" - extension for the file to be loaded: name = root + ext
Returns:
(tuple): tuple containing:
- ndarray(float): 2D array containing degrees and corresponding values of adf
"""
# open file
path = root + ext
try:
f = open(path, 'r')
except IOError:
utility.err_file('angle_load', path)
text = f.readlines() # read text as lines
for i in range(len(text)):
text[i] = text[i].split() # split each line into list with strings as elements
for i in range(len(text)):
if len(text[i]) > 1:
# find beginning beginning of data
if text[i] == ['DEGREE', 'ADF']:
data = np.array(text[i+1:], dtype=float)
break
return data
|
f1218dc2dc1a6c5ef1c56689111086137b04a786
| 3,638,602
|
import pickle
def load_newsdata_and_labels():
"""
Read newsdata, return list of documents, each line in list is one document as string.
And list of labels, each line in list is one-hot-encoded class
"""
# read newsdata which is pickled
def read_pickle_one_by_one(pickle_file):
with open(pickle_file, "rb") as t_in:
while True:
try:
yield pickle.load(t_in)
except EOFError:
break
#sentnos = [s for s in read_pickle_one_by_one("sentnos.pkl")] # sentence numbers
labels = [l for l in read_pickle_one_by_one("data_own/labels.pkl")]
#focuses = [f for f in read_pickle_one_by_one("focuses.pkl")]
texts = [t for t in read_pickle_one_by_one("data_own/texts.pkl")]
# assert == len(labels) == len(texts) # == len(sentnos) == len(focuses)
#print("longest text")
#print(max(len(t) for t in texts))
#print(sentnos[23])
#print(texts[23])
#print(focuses[23])
#print(labels[23])
# import copy
# if need real copies, not just new pointers
# new_texts = copy.deepcopy(texts)
# empty list, same lenght as texts
new_texts = [None] * len(texts)
# go through list and for each document in list, join list of words to a string
for documentnr, value in enumerate(texts):
#print(document, value)
new_texts[documentnr] = ' '.join(value)
# labels are 5-6 classes. turn them into 1-hot-encoded. 6 classes mentioned in paper, only 5 present in data.
new_labels = np.zeros((len(labels),5))
for labelnr, value in enumerate(labels):
if value[0]==1:
new_labels[labelnr][0]=1 #one hot to true
elif value[0]==0.7:
new_labels[labelnr][1]=1
elif value[0]==0.5:
new_labels[labelnr][2]=1
elif value[1]==0.7:
new_labels[labelnr][3]=1
elif value[0]==0:
new_labels[labelnr][4]=1
x_text = new_texts
y = new_labels
return [x_text, y]
|
3838cbd42e5bab898fc969ebe9b4f326c736c773
| 3,638,605
|
def CollectUniqueByOrderOfAppearance(dataset:list):
"""
This method collect all unique in order of appearance and return it as list.
:param dataset:list: dataset list
"""
try:
seen = set()
seen_add = seen.add
return [x for x in dataset if not (x in seen or seen_add(x))]
except Exception as ex:
template = "An exception of type {0} occurred in [ContentSupport.CollectUniqueByOrderOfAppearance]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
|
e252d064bf0c525ec1c1781ca6dc915dbc9d46f0
| 3,638,606
|
async def list_slot_set_actions(current_user: User = Depends(Authentication.get_current_user_and_bot)):
"""
Returns list of slot set actions for bot.
"""
actions = mongo_processor.list_slot_set_actions(current_user.get_bot())
return Response(data=actions)
|
f52f1e996b2be38a0e175ca6bfcbfd694dc79240
| 3,638,607
|
from PyQt5 import QtGui, QtWidgets, QtCore
def openfile_dialog(file_types="All files (*)", multiple_files=False,
file_path='.', caption="Select a file..."):
"""
Opens a File dialog which is used in open_file() function
This function uses pyQt5.
Parameters
----------
file_types : str, optional. Default = all
types of files accepted
multiple_files : bool, optional. Default = False
Whether or not multiple files can be selected
file_path: str, optional. Default = '.'
path to starting or root directory
caption: str, optional. Default = "Select a file..."
caption of the open file dialog
Returns
-------
filename : str
full filename with absolute path and extension
Notes
-----
In jupyter notebooks use ``%gui Qt`` early in the notebook.
Examples
--------
>> import sidpy as sid
>> filename = sid.io.openfile_dialog()
>> print(filename)
"""
# Check whether QT is available
try:
except ImportError:
raise ModuleNotFoundError('Required package PyQt5 not available')
# try to find a parent the file dialog can appear on top
try:
get_QT_app()
except:
pass
for param in [file_path, file_types, caption]:
if param is not None:
if not isinstance(param, (str, unicode)):
raise TypeError('param must be a string')
parent = None
if multiple_files:
func = QtWidgets.QFileDialog.getOpenFileNames
fnames, file_filter = func(parent, caption, file_path,
filter=file_types,
options=[QtCore.Qt.WindowStaysOnTopHint])
if len(fnames) > 0:
fname = fnames[0]
else:
return
else:
func = QtWidgets.QFileDialog.getOpenFileName
fname, file_filter = func(parent, caption, file_path,
filter=file_types)
if multiple_files:
return fnames
else:
return str(fname)
|
b96112c5af25350b49bc086820bcea32c228d3c8
| 3,638,608
|
def getBlocks(bal: "BKAlignedLayout"):
"""
Finds all blocks of a given layout.
:param bal The layout of which the blocks shall be found
:return: The blocks of the given layout
"""
blocks = defaultdict(list)
for layer in bal.layeredGraph.layers:
for node in layer:
root = bal.root[node]
blockContents = blocks[root]
blockContents.append(node)
return blocks
|
6f40dc209b72747f6960d474d54e1ffedd2fa9a1
| 3,638,609
|
def read_mcmc(path_to_file):
"""
Reads mcmc chain from file
Parameters
----------
path_to_file: string
Path to mcmc chain file
Returns
---------
emcee_table: pandas dataframe
Dataframe of mcmc chain values with NANs removed
"""
colnames = ['mhalo_c','mstellar_c','lowmass_slope','highmass_slope',\
'scatter']
if mf_type == 'smf' and survey == 'eco':
emcee_table = pd.read_csv(path_to_file,names=colnames,sep='\s+',\
dtype=np.float64)
else:
emcee_table = pd.read_csv(path_to_file, names=colnames,
delim_whitespace=True, header=None)
emcee_table = emcee_table[emcee_table.mhalo_c.values != '#']
emcee_table.mhalo_c = emcee_table.mhalo_c.astype(np.float64)
emcee_table.mstellar_c = emcee_table.mstellar_c.astype(np.float64)
emcee_table.lowmass_slope = emcee_table.lowmass_slope.astype(np.float64)
# Cases where last parameter was a NaN and its value was being written to
# the first element of the next line followed by 4 NaNs for the other
# parameters
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[4] == True and np.isnan(row)[3] == False:
scatter_val = emcee_table.values[idx+1][0]
row[4] = scatter_val
# Cases where rows of NANs appear
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
return emcee_table
|
f8d0cdd5ea5a7274e81992722db4df29d7664e43
| 3,638,610
|
def rotzV(x, theta):
"""Roate a coordinate in the local z frame"""
M = [[np.cos(theta), -np.sin(theta), 0], \
[np.sin(theta), np.cos(theta), 0], [0, 0, 1]]
return np.dot(M, x)
|
43e4f7a8f93fb2b237da1f6ac3f699bf41e38e0a
| 3,638,611
|
import fcntl
def has_flock(fd):
"""
Checks if fd has flock over it
True if it is, False otherwise
:param fd:
:return:
:rtype: bool
"""
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError:
return True
else:
return False
|
9ae997d06a12d73a659958bc2f0467ebdf0142b7
| 3,638,612
|
def ExtractCodeBySystem(
codable_concept,
system):
"""Extract code in codable_concept."""
for coding in codable_concept.coding:
if (coding.HasField('system') and coding.HasField('code') and
coding.system.value == system):
return coding.code.value
return None
|
e672cb3d2c1d8d65e49d00539cdecf6ee03d1143
| 3,638,613
|
def add_item(data, type):
""" Add an item to the data in ranked order
This function handles the process of adding an item to the list. It
first requests the item from the console. Items are nothing more than a
line of text typed in. Next, this kicks off a type of binary search to
find the proper location of the new item. Then it adds the item to
the data and lastly it prompts to add another.
Args:
data The original list of items
type A label describing the type of items being ranked
Returns
A new list containing the new item
"""
# prompt for item
clear_console()
print_header(' A D D I T E M')
prompt_message = "\nAdd something new for {type}, leave blank to quit ==> ".format(type = type.lower())
thing = input(prompt_message)
while thing != "":
# search for placement
place_at = search_for_spot(thing, data, 0, len(data) - 1)
# add to list
data.insert(place_at, thing)
# prompt for another
thing = input(prompt_message)
return data
|
0662fb19725e0985043d7ea75bad4c5fa55d921f
| 3,638,614
|
def get_full_test_names(testargs, machine, compiler):
###############################################################################
"""
Return full test names in the form:
TESTCASE.GRID.COMPSET.MACHINE_COMPILER.TESTMODS
Testmods are optional
Testargs can be categories or test names and support the NOT symbol '^'
>>> get_full_test_names(["cime_tiny"], "melvin", "gnu")
['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu']
>>> get_full_test_names(["cime_tiny", "PEA_P1_M.f45_g37_rx1.A"], "melvin", "gnu")
['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu']
>>> get_full_test_names(['ERS.f19_g16_rx1.A', 'NCK.f19_g16_rx1.A', 'PEA_P1_M.f45_g37_rx1.A'], "melvin", "gnu")
['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu']
>>> get_full_test_names(["cime_tiny", "^NCK.f19_g16_rx1.A"], "melvin", "gnu")
['ERS.f19_g16_rx1.A.melvin_gnu']
>>> get_full_test_names(["cime_test_multi_inherit"], "melvin", "gnu")
['TESTBUILDFAILEXC_P1.f19_g16_rx1.A.melvin_gnu', 'TESTBUILDFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTMEMLEAKFAIL_P1.f09_g16.X.melvin_gnu', 'TESTMEMLEAKPASS_P1.f09_g16.X.melvin_gnu', 'TESTRUNDIFF_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNFAILEXC_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.f45_g37_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.ne30_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P2.ne30_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P4.f45_g37_rx1.A.melvin_gnu', 'TESTRUNSTARCFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTTESTDIFF_P1.f19_g16_rx1.A.melvin_gnu']
"""
expect(machine is not None, "Must define a machine")
expect(compiler is not None, "Must define a compiler")
e3sm_test_suites = get_test_suites()
tests_to_run = set()
negations = set()
for testarg in testargs:
# remove any whitespace in name
testarg = testarg.strip()
if (testarg.startswith("^")):
negations.add(testarg[1:])
elif (testarg in e3sm_test_suites):
tests_to_run.update(get_test_suite(testarg, machine, compiler))
else:
try:
tests_to_run.add(CIME.utils.get_full_test_name(testarg, machine=machine, compiler=compiler))
except Exception:
if "." not in testarg:
expect(False, "Unrecognized test suite '{}'".format(testarg))
else:
raise
for negation in negations:
if (negation in e3sm_test_suites):
tests_to_run -= set(get_test_suite(negation, machine, compiler))
else:
fullname = CIME.utils.get_full_test_name(negation, machine=machine, compiler=compiler)
if (fullname in tests_to_run):
tests_to_run.remove(fullname)
return list(sorted(tests_to_run))
|
05fd19a412172195c2365b0c002c442ceabf7946
| 3,638,617
|
def record_or_not(record_mode, line, start_block, end_block):
""" """
if not record_mode:
if start_block in line:
record_mode = True
elif end_block in line:
record_mode = False
return record_mode
|
2b3952ab7fa3aa23ccbd712dee0aa06083b7b5f5
| 3,638,618
|
def compute_angle_stats(vec_mat, unit='deg'):
""" Get mean of angles the successif vectors used in the reconstruction.
return mean an variance of the angles.
"""
angles = []
for i in range(vec_mat.shape[1] - 1):
aux = 0
dot_prod = np.dot(vec_mat[i] / np.linalg.norm(vec_mat[i]),
vec_mat[i + 1] / np.linalg.norm(vec_mat[i + 1]))
if dot_prod < 0:
aux = np.pi
angles.append(np.arccos(dot_prod) + aux)
angles = np.asarray(angles)
if unit == 'deg':
angles *= 180 / np.pi
mean = np.mean(angles)
std = np.std(angles)
return (mean, std)
|
d1701a3eca12e41b05a38433a7561f811aceb02c
| 3,638,619
|
def id_test_data(value):
"""generate id"""
return f"action={value.action_name} return={value.return_code}"
|
47649b7302ef2f3ad046fc1c7b3fc18da2687921
| 3,638,620
|
def kneeJointCenter(frame, hip_JC, delta, vsk=None):
"""Calculate the knee joint center and axis.
Takes in a dictionary of marker names to x, y, z positions, the hip axis
and pelvis axis. Calculates the knee joint axis and returns the knee origin
and axis.
Markers used: RTHI, LTHI, RKNE, LKNE, hip_JC
Subject Measurement values used: RightKneeWidth, LeftKneeWidth
Knee joint center: Computed using Knee Axis Calculation [1]_.
Parameters
----------
frame : dict
dictionaries of marker lists.
hip_JC : array
An array of hip_JC containing the x,y,z axes marker positions of the
hip joint center.
delta : float, optional
The length from marker to joint center, retrieved from subject
measurement file.
vsk : dict, optional
A dictionary containing subject measurements.
Returns
-------
R, L, axis : array
Returns an array that contains the knee axis center in a 1x3 array of
xyz values, which is then followed by a 2x3x3
array composed of the knee axis center x, y, and z axis components. The
xyz axis components are 2x3 arrays consisting of the
axis center in the first dimension and the direction of the axis in the
second dimension.
References
----------
.. [1] Baker, R. (2013). Measuring walking : a handbook of clinical gait
analysis. Mac Keith Press.
Notes
-----
delta is changed suitably to knee.
Examples
--------
>>> import numpy as np
>>> from .pyCGM import kneeJointCenter
>>> vsk = { 'RightKneeWidth' : 105.0, 'LeftKneeWidth' : 105.0 }
>>> frame = { 'RTHI': np.array([426.50, 262.65, 673.66]),
... 'LTHI': np.array([51.93, 320.01, 723.03]),
... 'RKNE': np.array([416.98, 266.22, 524.04]),
... 'LKNE': np.array([84.62, 286.69, 529.39])}
>>> hip_JC = [[182.57, 339.43, 935.52],
... [309.38, 32280342417, 937.98]]
>>> delta = 0
>>> [arr.round(2) for arr in kneeJointCenter(frame,hip_JC,delta,vsk)] #doctest: +NORMALIZE_WHITESPACE
[array([413.2 , 266.22, 464.66]), array([143.55, 279.91, 524.77]), array([[[414.2 , 266.22, 464.6 ],
[413.14, 266.22, 463.66],
[413.2 , 267.22, 464.66]],
[[143.65, 280.89, 524.62],
[142.56, 280.02, 524.85],
[143.65, 280.05, 525.76]]])]
"""
#Get Global Values
mm = 7.0
R_kneeWidth = vsk['RightKneeWidth']
L_kneeWidth = vsk['LeftKneeWidth']
R_delta = (R_kneeWidth/2.0)+mm
L_delta = (L_kneeWidth/2.0)+mm
#REQUIRED MARKERS:
# RTHI
# LTHI
# RKNE
# LKNE
# hip_JC
RTHI = frame['RTHI']
LTHI = frame['LTHI']
RKNE = frame['RKNE']
LKNE = frame['LKNE']
R_hip_JC = hip_JC[1]
L_hip_JC = hip_JC[0]
# Determine the position of kneeJointCenter using findJointC function
R = findJointC(RTHI,R_hip_JC,RKNE,R_delta)
L = findJointC(LTHI,L_hip_JC,LKNE,L_delta)
# Knee Axis Calculation(ref. Clinical Gait Analysis hand book, Baker2013)
#Right axis calculation
thi_kne_R = RTHI-RKNE
# Z axis is Thigh bone calculated by the hipJC and kneeJC
# the axis is then normalized
axis_z = R_hip_JC-R
# X axis is perpendicular to the points plane which is determined by KJC, HJC, KNE markers.
# and calculated by each point's vector cross vector.
# the axis is then normalized.
# axis_x = cross(axis_z,thi_kne_R)
axis_x = cross(axis_z,RKNE-R_hip_JC)
# Y axis is determined by cross product of axis_z and axis_x.
# the axis is then normalized.
axis_y = cross(axis_z,axis_x)
Raxis = np.asarray([axis_x,axis_y,axis_z])
#Left axis calculation
thi_kne_L = LTHI-LKNE
# Z axis is Thigh bone calculated by the hipJC and kneeJC
# the axis is then normalized
axis_z = L_hip_JC-L
# X axis is perpendicular to the points plane which is determined by KJC, HJC, KNE markers.
# and calculated by each point's vector cross vector.
# the axis is then normalized.
# axis_x = cross(thi_kne_L,axis_z)
#using hipjc instead of thigh marker
axis_x = cross(LKNE-L_hip_JC,axis_z)
# Y axis is determined by cross product of axis_z and axis_x.
# the axis is then normalized.
axis_y = cross(axis_z,axis_x)
Laxis = np.asarray([axis_x,axis_y,axis_z])
# Clear the name of axis and then nomalize it.
R_knee_x_axis = Raxis[0]
R_knee_x_axis = R_knee_x_axis/norm3d(R_knee_x_axis)
R_knee_y_axis = Raxis[1]
R_knee_y_axis = R_knee_y_axis/norm3d(R_knee_y_axis)
R_knee_z_axis = Raxis[2]
R_knee_z_axis = R_knee_z_axis/norm3d(R_knee_z_axis)
L_knee_x_axis = Laxis[0]
L_knee_x_axis = L_knee_x_axis/norm3d(L_knee_x_axis)
L_knee_y_axis = Laxis[1]
L_knee_y_axis = L_knee_y_axis/norm3d(L_knee_y_axis)
L_knee_z_axis = Laxis[2]
L_knee_z_axis = L_knee_z_axis/norm3d(L_knee_z_axis)
#Put both axis in array
# Add the origin back to the vector
y_axis = R_knee_y_axis+R
z_axis = R_knee_z_axis+R
x_axis = R_knee_x_axis+R
Raxis = np.asarray([x_axis,y_axis,z_axis])
# Add the origin back to the vector
y_axis = L_knee_y_axis+L
z_axis = L_knee_z_axis+L
x_axis = L_knee_x_axis+L
Laxis = np.asarray([x_axis,y_axis,z_axis])
axis = np.asarray([Raxis,Laxis])
return [R,L,axis]
|
1f4ab38ef90c79c964587551e05ce32ccf482e53
| 3,638,621
|
def uniform(low: float = 0.0,
high: float = 1.0,
size: tp.Optional[SIZE_TYPE] = None):
"""
Draw samples from a uniform distribution.
"""
if high < low:
raise ValueError("high must not be less than low")
u = _draw_and_reshape(size, rand)
return u * (high - low) + low
|
da79a086a9c129a88e45c11407ca0d3993104f1a
| 3,638,622
|
import math
def toInt():
"""This built-in function casts the current value to Int and returns the result.
"""
def transform_function(current_value: object, record: dict, complete_transform_schema: dict,
custom_variables: dict):
value_to_return = None
if current_value is not None:
try:
clean_current_value = current_value.replace(",", ".")
float_current_value = float(clean_current_value)
value_to_return = int(math.floor(float_current_value))
except:
value_to_return = None
return value_to_return
return transform_function
|
0f58cb6c85ca4015696c7fef2d9378c0466c2422
| 3,638,623
|
import json
def public_upload(request):
"""Public form to upload missing images
:param request: current user request
:type request: django.http.request
:return: rendered response
:rtype: HttpResponse
"""
upload_success = False
if request.method == "POST":
document = Document.objects.get(id=request.POST.get("inputDocument", None))
if document:
uploaded_image = request.FILES.get("inputFile", None)
if uploaded_image:
image = DocumentImage(
document=document,
image=uploaded_image,
name=document.word,
confirmed=False,
)
image.save()
upload_success = True
missing_images = Document.objects.values_list(
"id", "word", "article", "training_sets"
).filter(document_image__isnull=True)
training_sets = (
TrainingSet.objects.values_list("id", "title")
.filter(documents__document_image__isnull=True)
.distinct()
)
context = {
"documents": json.dumps(list(missing_images)),
"training_sets": json.dumps(list(training_sets)),
"upload_success": upload_success,
}
return render(request, "public_upload.html", context)
|
857b558566a52dc2b192efc3b1441a0da11a649c
| 3,638,624
|
import pathlib
import re
def load_classes(fstem):
"""Load all classes from a python file."""
all_classes = []
header = []
forward_refs = []
class_text = None
done_header = False
fname = pathlib.Path('trestle/oscal/tmp') / (fstem + '.py')
with open(fname, 'r', encoding='utf8') as infile:
for r in infile.readlines():
# collect forward references
if r.find('.update_forward_refs()') >= 0:
forward_refs.append(r)
elif r.find(class_header) == 0: # start of new class
done_header = True
if class_text is not None: # we are done with current class so add it
all_classes.append(class_text)
class_text = ClassText(r, fstem)
else:
if not done_header: # still in header
header.append(r.rstrip())
else:
# this may not be needed
p = re.compile(r'.*Optional\[Union\[([^,]+),.*List\[Any\]')
refs = p.findall(r)
if len(refs) == 1:
logger.info(f'Replaced Any with {refs[0]} in {fstem}')
r_orig = r
r = r.replace('List[Any]', f'List[{refs[0]}]')
logger.info(f'{r_orig} -> {r}')
class_text.add_line(r.rstrip())
all_classes.append(class_text) # don't forget final class
# force all oscal versions to the current one
all_classes = constrain_oscal_version(all_classes)
return all_classes
|
da4baaed8d849b90c51200b778bdde9a47d58cc4
| 3,638,625
|
def build_optimizer(name, lr=0.001, **kwargs):
"""Get an optimizer for TensorFlow high-level API Estimator.
Args:
name (str): Optimizer name. Note, to use 'Momentum', should specify
lr (float): Learning rate.
kwargs (dictionary): Optimizer arguments.
Returns:
tf.train.Optimizer
"""
if name == 'Adadelta':
optimizer = tf.train.AdadeltaOptimizer(learning_rate=lr, **kwargs)
elif name == 'Adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate=lr, **kwargs)
elif name == 'Adam':
optimizer = tf.train.AdamOptimizer(learning_rate=lr, **kwargs)
elif name == 'Ftrl':
optimizer = tf.train.FtrlOptimizer(learning_rate=lr, **kwargs)
elif name == 'Momentum':
if 'momentum' in kwargs:
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, **kwargs)
else:
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9, **kwargs)
elif name == 'RMSProp':
optimizer = tf.train.RMSPropOptimizer(learning_rate=lr, **kwargs)
elif name == 'SGD':
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr, **kwargs)
else:
raise ValueError(
"""Optimizer name should be either 'Adadelta', 'Adagrad', 'Adam',
'Ftrl', 'Momentum', 'RMSProp', or 'SGD'"""
)
return optimizer
|
c04c9348f26fdf25951f362a693cb70765133756
| 3,638,627
|
import yaml
def generate_pruning_config(model_name,
sparsity,
begin_step=0,
end_step=-1,
schedule='ConstantSparsity',
granularity='BlockSparsity',
respect_submatrix=False,
two_over_four_chin=False,
ch_share=True,
path=None):
"""Generate a model pruning config out of sparsity configuration.
Arguments:
model_name: A `str`. 'mnist', 'resnet56' (CIFAR-10), 'resnet50' (ImageNet),
or 'mobilenetV1'.
sparsity: A `dict`. Keys are `str` representing layer names (or possibly a
regexp pattern), and values are sparsity (must be convertible to float).
begin_step: Step at which to begin pruning. `0` by default.
end_step: Step at which to end pruning. `-1` by default. `-1` implies
continuing to prune till the end of training (available only for
'ConstantSparsity' schedule).
schedule: 'ConstantSparsity' or 'PolynomialDecay'.
granularity: 'ArayaMag', 'BlockSparsity', 'ChannelPruning', 'KernelLevel',
or 'QuasiCyclic'.
respect_submatrix: A `bool`. Whether or not to mask weight tensors
submatrix-wise.
two_over_four_chin: A `bool`. Whether or not to realize two-out-of-four
sparsity pattern along input channels. Defaults to `False`, in which case
the sparsity pattern is achieved along the output channels.
ch_share: A `bool`. Whether or not to share masks ac
path: `None` or a `str`. If `str`, saves the model pruning config as YAML
file.
Returns:
A ModelPruningConfig instance.
"""
def get_pruning_schedule_config(_sparsity):
_sparsity = float(_sparsity)
config = dict(begin_step=begin_step, end_step=end_step, frequency=100)
if schedule == 'ConstantSparsity':
config['target_sparsity'] = _sparsity
elif schedule == 'PolynomialDecay':
config['initial_sparsity'] = 0.
config['final_sparsity'] = _sparsity
config['power'] = 3
else:
raise ValueError
return pruning_base_configs.PruningScheduleConfig(
class_name=schedule,
config=config
)
def get_pruning_granularity_config(_sparsity):
_sparsity = float(_sparsity)
config = dict()
if granularity in ('ArayaMag', 'QuasiCyclic'):
config['gamma'] = int(1/(1.0 - _sparsity))
if respect_submatrix:
config['respect_submatrix'] = True
elif granularity == 'BlockSparsity':
config['block_size'] = [1, 1]
config['block_pooling_type'] = 'AVG'
elif granularity == 'ChannelPruning':
config['ch_axis'] = -1
elif granularity == 'KernelLevel':
config['ker_axis'] = [0, 1]
elif granularity == 'TwoOutOfFour':
block_axis = -2 if two_over_four_chin else -1
config['block_axis'] = block_axis
if respect_submatrix:
config['respect_submatrix'] = True
else:
raise ValueError
return pruning_base_configs.PruningGranularityConfig(
class_name=granularity,
config=config,
)
def get_pruning_config(_sparsity):
return pruning_base_configs.PruningConfig(
pruning_schedule=get_pruning_schedule_config(_sparsity),
pruning_granularity=get_pruning_granularity_config(_sparsity),
)
model_pruning_config = pruning_base_configs.ModelPruningConfig(
model_name=model_name,
pruning=[]
)
for layer_name, _sparsity in sparsity.items():
layer_pruning_config = pruning_base_configs.LayerPruningConfig(
layer_name=layer_name,
pruning = [
pruning_base_configs.WeightPruningConfig(
weight_name='kernel',
pruning=get_pruning_config(_sparsity),
)
]
)
model_pruning_config.pruning.append(layer_pruning_config)
if granularity == 'ChannelPruning' and ch_share:
if model_name.startswith('resnet'):
model_pruning_config.share_mask = _get_resnet_share_mask(model_name)
if path:
def save_params_dict_to_yaml(params, file_path):
"""Saves the input ParamsDict to a YAML file.
Taken from params_dict.save_params_dict_to_yaml.
"""
with tf.io.gfile.GFile(file_path, 'w') as f:
#def _my_list_rep(dumper, data):
# # u'tag:yaml.org,2002:seq' is the YAML internal tag for sequence.
# return dumper.represent_sequence(
# u'tag:yaml.org,2002:seq', data, flow_style=True)
#
#yaml.add_representer(list, _my_list_rep)
yaml.dump(params.as_dict(), f, default_flow_style=False)
save_params_dict_to_yaml(model_pruning_config, path)
return model_pruning_config
|
42f566ce574b53d6effafcec18984c201fba7f92
| 3,638,628
|
def collect_FR_dev(stim_array,stim_dt,sim_dt,spikemon,n,return_spikes=False):
"""
get all firing rates for a given spikemon
stim_array: array of stimulation time/strengths, e.g., [0,0,0,0,1,0,0,0,1,0,0]
stim_dt: time interval of stimulation
sim_dt: time interval of simulation
spikemon_t: time array from spike monitor
returns:
spikelist: (n,len(stim_start_times)) matrix of spike counts.
"""
#print 'spikemon.i min',np.amin(spikemon.i),np.amax(spikemon.i)
spikemon_t = spikemon
# get all stim start times (index position*stim_dt)
stim_start_times = np.where(stim_array!=0)[0]*stim_dt
# preallocate firing rate array and standard deviation
FR_array = np.zeros(len(stim_start_times))
dev_array = np.zeros(len(stim_start_times))
spikelist = np.zeros((n,len(stim_start_times)))
for i in range(len(stim_start_times)):
FR_array[i],dev_array[i],spikelist[:,i] = get_FR_dev(stim_start_times[i],stim_start_times[i]+stim_dt,sim_dt,spikemon,n)
#print 'type',type(dev_array)
return FR_array,dev_array,spikelist
|
05914a68085112e0b1b4353dbd6434fb3e59c7c8
| 3,638,629
|
import json
def assertDict(s):
""" Assert that the input is a dictionary. """
if isinstance(s,str):
try:
s = json.loads(s)
except:
raise AssertionError('String "{}" cannot be json-decoded.'.format(s))
if not isinstance(s,dict): raise AssertionError('Variable "{}" is not a dictionary.'.format(s))
return s
|
302defb4e1eecc9a6171cda0401947e3251be585
| 3,638,630
|
import math
def _consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate and subdivide some geometry.
Consolidate a geometry into a convex hull, then subdivide it into smaller
sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry in geometry's units:
any polygon bigger will get divided up for multiple queries to API
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise TypeError("Geometry must be a shapely Polygon or MultiPolygon")
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds the
# max size, get the convex hull around the geometry
if isinstance(geometry, MultiPolygon) or (
isinstance(geometry, Polygon) and geometry.area > max_query_area_size
):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = _quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
|
32fbafcf3066cc73c022fc1482030d35387107c2
| 3,638,631
|
import ast
from typing import Tuple
from typing import List
from typing import Any
def get_function_args(node: ast.FunctionDef) -> Tuple[List[Any], List[Any]]:
"""
This functon will process function definition and will extract all
arguments used by a given function and return all optional and non-optional
args used by the function.
Args:
node: Function node containing function that needs to be analyzed
Returns:
(non_optional_args, optional_args): named function args
"""
assert (
type(node) == ast.FunctionDef
), "Incorrect node type. Expected ast.FunctionDef, got {}".format(type(node))
total_args = len(node.args.args)
default_args = len(node.args.defaults)
optional_args = []
non_optional_args = []
# Handle positional args
for i in range(total_args):
if i + default_args < total_args:
non_optional_args.append(node.args.args[i].arg)
else:
optional_args.append(node.args.args[i].arg)
# Handle named args
for arg in node.args.kwonlyargs:
optional_args.append(arg.arg)
return non_optional_args, optional_args
|
a4fe9dccedd5684050a7d5e7949e384dd4021035
| 3,638,633
|
def svn_client_copy3(*args):
"""
svn_client_copy3(svn_commit_info_t commit_info_p, char src_path, svn_opt_revision_t src_revision,
char dst_path,
svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t
"""
return apply(_client.svn_client_copy3, args)
|
336aaec7d6c2a44f22d6b1164316f16b4fd5f53f
| 3,638,634
|
import json
import copy
def delete(server = None, keys = None):
"""
Marks an entity or entities as deleted on the server. Until an entity
is permanently deleted (an administrative operation, not available
through the RESTful API), it can still be accessed, but will not turn
up in search results.
:param server: a :class:`~pyCoalesce.coalesce_request.CoalesceServer`
object or the URL of a Coalesce server
:param keys: a UUID key of the entity to be deleted, or an iterable of
such keys. Each key can be an instance of the :class:`uuid.UUID`
class, or any string or integer that could serve as input to the
:class:`UUID <uuid.UUID>` class constructor.
:returns: ``True`` if the returned status code is 204 (indicating a
successful deletion), ``False`` (with a warning) in the unlikely
event that the server returns another status code in the 200's.
(Any value outside the 200's will cause an exception.)
"""
if isinstance(server, str):
server_obj = CoalesceServer(server)
else:
server_obj = server
# Figure out whether we have one key or an iterable of them, check the
# validity of each, and transform them into a JSON array.
if keys:
# Test for a single key--a list of keys or a JSON array as a string
# will cause "_test_key" to throw an error.
try:
key_str = _test_key(keys)
except TypeError: # "keys" is probably a list of keys.
keys_list = [_test_key(key) for key in keys]
keys_str = json.dumps(keys_list)
except ValueError: # "keys" is probably a JSON array of keys.
json.loads(keys) # Make sure that "keys" is valid JSON.
keys_str = keys
else:
keys_str = '["' + key_str + '"]'
operation = "delete"
try:
API_URL = _construct_URL(server_obj = server_obj,
operation = operation)
except AttributeError as err:
raise AttributeError(str(err) + '\n.This error can occur if the ' +
'argument "server" is not either a URL or a ' +
'CoalesceServer object.')
method = OPERATIONS[operation]["method"]
headers = copy(server_obj.base_headers)
headers["Content-type"] = "application/json"
# Submit the request.
response = get_response(URL = API_URL, method = method, data = keys_str,
headers = headers, delay = 1, max_attempts = 4)
# Check for the appropriate status code.
status = response.status_code
if status == 204:
success = True
else:
warn("The API server returned an unexpected status code, " + status +
". However, the entity might have been deleted on the server, " +
"or might be deleted after a delay.", UnexpectedResponseWarning)
success = False
return success
|
b3662ba85b1aacfca6034da5d5e198a5ffada2fa
| 3,638,635
|
import collections
import csv
def ParseMemCsv(f):
"""Compute summary stats for memory.
vm5_peak_kib -> max(vm_peak_kib) # over 5 second intervals. Since it uses
the kernel, it's accurate except for takes that spike in their last 4
seconds.
vm5_mean_kib -> mean(vm_size_kib) # over 5 second intervals
"""
peak_by_pid = collections.defaultdict(list)
size_by_pid = collections.defaultdict(list)
# Parse columns we care about, by PID
c = csv.reader(f)
for i, row in enumerate(c):
if i == 0:
continue # skip header
# looks like timestamp, pid, then (rss, peak, size)
_, pid, _, peak, size = row
if peak != '':
peak_by_pid[pid].append(int(peak))
if size != '':
size_by_pid[pid].append(int(size))
mem_by_pid = {}
# Now compute summaries
pids = peak_by_pid.keys()
for pid in pids:
peaks = peak_by_pid[pid]
vm5_peak_kib = max(peaks)
sizes = size_by_pid[pid]
vm5_mean_kib = sum(sizes) / len(sizes)
mem_by_pid[pid] = (vm5_peak_kib, vm5_mean_kib)
return mem_by_pid
|
5d10a0d0ac5ab3d3e99ff5fd4c9ca6cd0b74656b
| 3,638,636
|
def index_containing_substring(list_str, substring):
"""For a given list of strings finds the index of the element that contains the
substring.
Parameters
----------
list_str: list of strings
substring: substring
Returns
-------
index: containing the substring or -1
"""
for i, s in enumerate(list_str):
if substring in s:
return i
return -1
|
2816899bc56f6b2c305192b23685d3e803b420df
| 3,638,637
|
import pycountry
import gettext
import six
def _localized_country_list_inner(locale):
"""
Inner function supporting :func:`localized_country_list`.
"""
if locale == 'en':
countries = [(country.name, country.alpha_2) for country in pycountry.countries]
else:
pycountry_locale = gettext.translation('iso3166-1', pycountry.LOCALES_DIR, languages=[locale])
if six.PY2:
countries = [(pycountry_locale.gettext(country.name).decode('utf-8'), country.alpha_2) for country in pycountry.countries]
else:
countries = [(pycountry_locale.gettext(country.name), country.alpha_2) for country in pycountry.countries]
countries.sort()
return [(code, name) for (name, code) in countries]
|
c67b107d10b8bc2426de39f11c30e886b5fc2894
| 3,638,638
|
def ingest_questions(questions: dict, assignment: Assignment):
"""
questions: [
{
sequence: int
questions: [
{
q: str // what is 2*2
a: str // 4
},
]
},
...
]
response = {
rejected: [ ... ]
ignored: [ ... ]
accepted: [ ... ]
}
:param questions:
:param assignment:
:return:
"""
question_shape = {"questions": {"q": str, "a": str}, "sequence": int}
if questions is None:
return
# Iterate over questions
rejected, ignored, accepted = [], [], []
for question_sequence in questions:
shape_good, err_path = _verify_data_shape(question_sequence, question_shape)
if not shape_good:
# Reject the question if the shape is bad and continue
rejected.append(
{
"question": question_sequence,
"reason": "could not verify data shape " + err_path,
}
)
continue
pool = question_sequence["pool"]
for question in question_sequence["questions"]:
# Check to see if question already exists for the current
# assignment
exists = AssignmentQuestion.query.filter(
AssignmentQuestion.assignment_id == assignment.id,
AssignmentQuestion.question == question["q"],
).first()
if exists is not None:
# If the question exists, ignore it and continue
ignored.append({"question": question, "reason": "already exists"})
continue
# Create the new question from posted data
assignment_question = AssignmentQuestion(
assignment_id=assignment.id,
question=question["q"],
solution=question["a"],
pool=pool,
)
db.session.add(assignment_question)
accepted.append({"question": question})
# Commit additions
db.session.commit()
return accepted, ignored, rejected
|
d72370bcaa5cf1f5017eda827cca6dd011ac36d0
| 3,638,639
|
from datetime import datetime
def render_book_template(book_id):
"""
Find a specific book in the database.
Locate the associated reviews (sorted by score and date).
Create the purchase url.
Check whether the user has saved the book to their wishlist.
"""
# Find the book document in the database
this_book = mongo.db.books.find_one({"_id": ObjectId(book_id)})
# Find the reviews that relate to that book
this_book_reviews = list(
mongo.db.reviews.find({"book_id": ObjectId(book_id)})
)
# Sort by review score and then by date added
sorted_book_reviews = sorted(
this_book_reviews,
key=lambda b: (-b["review_score"], -b["review_date"]),
)
# Create the book purchase url
# by adding the book title and author to the url
this_book_title = this_book["title"].replace(" ", "+")
this_book_author = this_book["authors"][0].replace(" ", "+")
book_purchase_url = (
"https://www.amazon.com/s?tag=falsetag&k=" +
this_book_title + "+" + this_book_author
)
# Create a list of users who have reviewed this book already
reviewers = []
for book_review in this_book_reviews:
# Convert floats to datetime format in each book review
book_review["review_date"] = datetime.datetime.fromtimestamp(
book_review["review_date"]
).strftime("%a, %b %d, %Y")
# Add reviewers to the reviewers list
reviewers.append(book_review["created_by"])
bookmark = False
purchased = False
# If the session cookie exists then the user is logged in
if session:
# Grab the session user's wishlist from the database
wishlist = mongo.db.users.find_one({"username": session["user"]})[
"wishlist"
]
# Check to see whether the current user
# has already saved this book to their wishlist
# If so, remove the bookmark
if this_book["_id"] in wishlist:
bookmark = True
# Check and see whether the current user has reviewed this book
# If they have presumably they don't want to purchase the book
if session["user"] in reviewers:
purchased = True
return render_template(
"view_book.html",
this_book=this_book,
this_book_reviews=sorted_book_reviews,
book_purchase_url=book_purchase_url,
reviewers=reviewers,
bookmark=bookmark,
purchased=purchased,
)
|
d30b30b79b102b1c08404bc00c69b1f22ccebc6a
| 3,638,640
|
def iatan2(y,x):
"""One coordinate must be zero"""
if x == 0:
return 90 if y > 0 else -90
else:
return 0 if x > 0 else 180
|
a0b18b61d7ffadf864a94299bc4a3a0aacd7c65a
| 3,638,641
|
import torch
def fuse_bn_sequential(model):
"""
This function takes a sequential block and fuses the batch normalization with convolution
:param model: nn.Sequential. Source resnet model
:return: nn.Sequential. Converted block
"""
if not isinstance(model, torch.nn.Sequential):
return model
stack = []
for m in model.children():
if isinstance(m, torch.nn.BatchNorm2d):
if isinstance(stack[-1], torch.nn.Conv2d):
bn_st_dict = m.state_dict()
conv_st_dict = stack[-1].state_dict()
# BatchNorm params
eps = m.eps
mu = bn_st_dict['running_mean']
var = bn_st_dict['running_var']
gamma = bn_st_dict['weight']
if 'bias' in bn_st_dict:
beta = bn_st_dict['bias']
else:
beta = torch.zeros(gamma.size(0)).float().to(gamma.device)
# Conv params
W = conv_st_dict['weight']
if 'bias' in conv_st_dict:
bias = conv_st_dict['bias']
else:
bias = torch.zeros(W.size(0)).float().to(gamma.device)
denom = torch.sqrt(var + eps)
b = beta - gamma.mul(mu).div(denom)
A = gamma.div(denom)
bias *= A
A = A.expand_as(W.transpose(0, -1)).transpose(0, -1)
W.mul_(A)
bias.add_(b)
stack[-1].weight.data.copy_(W)
if stack[-1].bias is None:
stack[-1].bias = torch.nn.Parameter(bias)
else:
stack[-1].bias.data.copy_(bias)
else:
stack.append(m)
if len(stack) > 1:
return torch.nn.Sequential(*stack)
else:
return stack[0]
|
6d31cd2cd73e8dc91098b7f9cc7f70ce3b81a3b9
| 3,638,642
|
def get_baseconf_settings( baseconf_settings_filename = None ):
"""
Returns the basic configuration settings as a parameter structure.
:param baseconf_settings_filename: loads the settings from the specified filename, otherwise from the default filename or in the absence of such a file creates default settings from scratch.
:return: parameter structure
"""
# These are the parameters for the general I/O and example cases
baseconf_params = pars.ParameterDict()
baseconf_params[('baseconf',{},'determines if settings should be loaded from file and visualization options')]
if baseconf_settings_filename is not None:
print( 'Loading baseconf configuration from: ' + baseconf_settings_filename )
baseconf_params.load_JSON( baseconf_settings_filename )
return baseconf_params
else:
print( 'Using default baseconf settings from config_parser.py')
baseconf_params['baseconf'][('load_default_settings_from_default_setting_files',False,'if set to True default configuration files (in settings directory) are first loaded')]
baseconf_params['baseconf'][('load_settings_from_file',True,'if set to True configuration settings are loaded from file')]
baseconf_params['baseconf'][('save_settings_to_file',True,'if set to True configuration settings are saved to file')]
if not baseconf_params['baseconf']['load_default_settings_from_default_setting_files']:
print('HINT: Only compute_settings.json and baseconf_settings.json will be read from file by default.')
print('HINT: Set baseconf.load_default_settings_from_default_setting_files to True if you want to use the other setting files in directory settings.')
print('HINT: Otherwise the defaults will be as defined in config_parser.py.')
return baseconf_params
|
0b0b829f4923072431b8e73c7fd70e732f17dc30
| 3,638,643
|
def subtract_background(image, background_image):
"""Subtracts background image from a specified image.
Returns
-------
bs_image : np.ndarray of type np.int | shape = [image.shape]
Background-subtracted image.
"""
image = image.copy().astype(np.int)
background = background_image.copy().astype(np.int)
bs_image = image - background
return bs_image.astype(np.int)
|
c136f78c1f355c2031ef60ca17fb0bd6fc63c94e
| 3,638,645
|
import math
def batch_genomes(genomes, num_batches, order):
"""
Populates 2D numpy array with len(rows)==num_batches in {order} major order.
Using col is for when you know you are using X number of nodes, and want-
to evenly distribute genomes across each node
Use row when you want to fill each node, i.e. you give each node 16 cores-
and would rather have 3 at 32 and 1 at 16 than 4 at 28
"""
total_genomes = len(genomes)
# num_batches designates number of rows in col major order
# but number of cols in row major order
genomes_per_batch = math.ceil(total_genomes/num_batches)
batches = np.empty([num_batches, genomes_per_batch], dtype=object)
if order == 'col':
for i, genome in enumerate(genomes):
batches[i%num_batches][i//num_batches] = genome
elif order == 'row':
for i, genome in enumerate(genomes):
batches[i//genomes_per_batch][i%genomes_per_batch] = genome
else:
raise Exception("Order must be specified as 'col' or 'row'")
return batches
|
00fea150ea20ae886fd72f099a1c0bd4216ba987
| 3,638,646
|
def single_gate_params(gate, params=None):
"""Apply a single qubit gate to the qubit.
Args:
gate(str): the single qubit gate name
params(list): the operation parameters op['params']
Returns:
a tuple of U gate parameters (theta, phi, lam)
"""
if gate == 'U' or gate == 'u3':
return (params[0], params[1], params[2])
elif gate == 'u2':
return (np.pi/2, params[0], params[1])
elif gate == 'u1':
return (0., 0., params[0])
elif gate == 'id':
return (0., 0., 0.)
|
153459403639103cdfa9502a26797e9c536ba112
| 3,638,647
|
import time
def runTests(data, targets, pipeline, parameters):
""" Perform grid search with specified pipeline and parameters
on data training set with targets as labels
Evaluate performance based on precision and print parameters
for best estimator
grid search object is returned for further analysis"""
grid_search = GridSearchCV(pipeline, parameters, verbose=1, cv=10, scoring='precision')
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
print(parameters)
t0 = time()
grid_search.fit(data, targets)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
return grid_search
|
122d1330444dd72aa064cd263cb7f405bf2bf9ba
| 3,638,650
|
def isMultipleTagsInput(item):
"""
Returns True if the argument datatype is not a column or a table, and if it allows lists and if it has no permitted value.
This function is used to check whether the argument values have to be delimited by the null character (returns True) or not.
:param item: Table argument.
"""
return item.get('datatype', 'STRING') in ['STRING','DOUBLE','INTEGER','DRIVER','SQLEXPR', 'LONG']\
and item.get('allowsLists', False)\
and not item.get('permittedValues', [])
|
f7710902e27962fc8df55bc75be2d5d404144aeb
| 3,638,651
|
def remove_url(url: str = Form(...)):
"""
Remove url from the url json file
:param url: api url in the format: http://ip:port/
:return: ApiResponse
"""
try:
payload = helpers.parse_json(url_config_path)
except Exception as e:
return ApiResponse(success=False, error=e)
if url in payload['urls']:
payload['urls'].remove(url)
helpers.write_json(payload, url_config_path)
return ApiResponse(data={"url removed successfully"})
else:
return ApiResponse(success=False, error="url is not present in config file")
|
c7c218926c2992df19b3988987fca8cf2bbff3d1
| 3,638,653
|
def load_CSVdata(messages_filepath, categories_filepath):
"""
Load and merge datasets messages and categories
Inputs:
Path to the CSV file containing messages
Path to the CSV file containing categories
Output:
dataframe with merged data containing messages and categories
"""
#reading messages and categories
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
#merge datasets
df = pd.merge(messages,categories,on='id')
return df
|
e216c09ca403545fbc1152a25e966efeb4baeefc
| 3,638,654
|
def accept_invite(payload, user):
"""
Accepts an invite
args: payload, user
ret: response
"""
try:
invite = Invites.get(payload['invite'])[0]
except:
return Message(
Codes.NOT_FOUND,
{ 'message': 'There isn\'t any active invite with the given id.' }
)
if user['id'] != invite[1]:
return Message(
Codes.FORBIDDEN,
{ 'message': 'This invitation was sent to another user.' }
)
UsersGroups.insert(user['id'], invite[2])
Invites.close(invite[0])
return Message(
Codes.SUCCESS,
{ 'message': 'You have successfully joined this group.' }
)
|
bff423eeec7f7f527771934de0f32ede0f528948
| 3,638,655
|
from typing import Tuple
from typing import Dict
from typing import List
import re
def clean_status_output(
input: str,
) -> Tuple[bool, Dict[str, str], List[Dict[str, str]]]:
# example input
"""
# Health check:
# - dns: rename /etc/resolv.conf /etc/resolv.pre-tailscale-backup.conf: device or resource busy
100.64.0.1 test_domain_1 omnet linux -
100.64.0.2 test_network_1 omnet linux active; relay "syd", tx 1188 rx 1040
"""
up = False
peers: List[Dict[str, str]] = []
host: Dict[str, str] = {}
if "Tailscale is stopped." in input:
return up, host, peers
elif "unexpected state: NoState" in input:
return up, host, peers
count = 0
for line in str(input).split("\n"):
matches = re.match(r"^\d.+", line)
if matches is not None:
try:
stat_parts = re.split(r"(\s+)", matches.string)
entry = {}
entry["ip"] = stat_parts[0]
entry["hostname"] = stat_parts[2]
entry["network"] = stat_parts[4]
entry["os"] = stat_parts[6]
connection_info_parts = matches.string.split(entry["os"])
entry["connection_info"] = "n/a"
connection_info = ""
if len(connection_info_parts) > 1:
connection_info = connection_info_parts[1].strip()
entry["connection_info"] = connection_info
entry["connection_status"] = "n/a"
if "active" in connection_info:
entry["connection_status"] = "active"
if "idle" in connection_info:
entry["connection_status"] = "idle"
entry["connection_type"] = "n/a"
if "relay" in connection_info:
entry["connection_type"] = "relay"
if "direct" in connection_info:
entry["connection_type"] = "direct"
if count == 0:
host = entry
count += 1
up = True
else:
peers.append(entry)
except Exception as e:
print("Error parsing tailscale status output", e)
pass
return up, host, peers
|
bbf100514373595948b0691dff857deb5772f019
| 3,638,656
|
def test_tensor_method_mul():
"""test_tensor_method_mul"""
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.sub = P.Sub()
def construct(self, x, y):
out = x * (-y)
return out.transpose()
net = Net()
x = ms.Tensor(np.ones([5, 3], np.float32))
y = ms.Tensor(np.ones([8, 5, 3], np.float32))
_executor.compile(net, x, y)
|
89866ebd9311e0bac0e3324b01545507b986751f
| 3,638,657
|
def _get_top_artists(session: Session, limit=100):
"""Gets the top artists by follows of all of Audius"""
top_artists = (
session.query(User)
.select_from(AggregateUser)
.join(User, User.user_id == AggregateUser.user_id)
.filter(AggregateUser.track_count > 0, User.is_current)
.order_by(desc(AggregateUser.follower_count), User.user_id)
.limit(limit)
.all()
)
return helpers.query_result_to_list(top_artists)
|
ae6a45e7190995fc35daf62236e73c4bd5c6235f
| 3,638,658
|
def _get_other_locations():
"""Returns all locations except convention venues."""
if 'all' not in location_cache.keys():
conv_venue = LocationType.objects.get(name='Convention venue')
location_cache['all'] = Location.objects.exclude(loc_type=conv_venue)
return location_cache['all']
|
a34bf432529a31bc013988c394230e55b01ac21b
| 3,638,660
|
import torch
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch.version.cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch.version.cuda
t_version = t_version.split('.')
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major or t_minor != tv_minor:
raise RuntimeError("Detected that PyTorch and torchvision were compiled with different CUDA versions. "
"PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. "
"Please reinstall the torchvision that matches your PyTorch install."
.format(t_major, t_minor, tv_major, tv_minor))
return _version
|
d86e209d10514060f0c15bff9ea28df6b2054480
| 3,638,661
|
def largest(layer,field):
"""largest(layer,field)
Returns the largest area significant class in the study area.
"""
theitems = []
rows = arcpy.SearchCursor(layer)
for row in rows:
theitems.append(row.getValue(field))
del rows
theitems.sort()
max1= theitems[-1]
return max1
|
ff6433a5fef48550e902317384dec746136063dc
| 3,638,662
|
import asyncio
import random
async def double_up(ctx):
"""
「ダブルアップチャンス!」を開始します。
"""
depth = 1 # 現在の階層
HOLE = "\N{HOLE}\N{VARIATION SELECTOR-16}"
LEFT_ARROW = "\N{LEFTWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}"
RIGHT_ARROW = "\N{BLACK RIGHTWARDS ARROW}\N{VARIATION SELECTOR-16}"
TOP_ARROW = "\N{UPWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}"
emojis = [LEFT_ARROW,RIGHT_ARROW] # 通常の穴選択用絵文字リスト
final_emojis = [LEFT_ARROW,TOP_ARROW,RIGHT_ARROW] # 最後の穴選択用絵文字リスト
def gold_check(msg):
# 掛け金の入力チェック用
return msg.author == ctx.author and msg.channel == ctx.channel and msg.content.isdecimal()
embed = discord.Embed(title="ダブルアップ",description=f"{ctx.author.mention} 掛け金を入力してください。",color=0x0000ff)
await ctx.send(embed=embed)
try:
gold_msg = await bot.wait_for("message",check=gold_check,timeout=30.0)
except asyncio.TimeoutError:
embed = discord.Embed(title="エラー",
description=f"{ctx.author.mention} 掛け金の正常な入力が確認されませんでした。コマンドの処理を終了します。",
color=0xff0000)
await ctx.send(embed=embed)
return
gold = int(gold_msg.content)
embed = discord.Embed(title=f"どちらの穴に入るか選ぼう!(このテキストのリアクションをタッチして選択)({depth}回目)",
description=f"{HOLE}\t{HOLE}\n{LEFT_ARROW}\t{RIGHT_ARROW}",color=0x00ff00)
embed.set_footer(text=f"掛け金:{gold * 2} G")
game_msg = await ctx.send(embed=embed) # ゲーム用メッセージ。以降はこれを編集してゲームを表現する。
while depth < 5:
await game_msg.edit(embed=embed)
for emoji in emojis:
await game_msg.add_reaction(emoji) # 穴選択用絵文字でリアクションする
def hole_check(reaction,user):
# 穴の入力チェック用
react_msg = reaction.message
are_same_msgs = react_msg.id == game_msg.id and react_msg.channel == game_msg.channel # メッセージの同一性
return are_same_msgs and user == ctx.author and str(reaction.emoji) in emojis
try:
hole_react,user = await bot.wait_for("reaction_add",check=hole_check,timeout=30.0)
except asyncio.TimeoutError:
embed = discord.Embed(title="エラー",
description=f"{ctx.author.mention} 穴の選択が正常に行われませんでした。コマンドの処理を終了します。",
color=0xff0000)
await ctx.send(embed=embed)
return
if random.randrange(2) == 0:
# 2分の1の確率ではずれを引く
embed = discord.Embed(title=f"はずれー!!",
description=f"{ctx.author.mention} 懲りずに、また挑戦してみてね!",color=0x00ff00)
await ctx.send(embed=embed)
return
depth += 1
gold *= 2
await hole_react.remove(user)
embed = discord.Embed(title=f"当たり!次の穴を選んでね!({depth}回目)",
description=f"{ctx.author.mention}\n{HOLE}\t{HOLE}\n{LEFT_ARROW}\t{RIGHT_ARROW}",
color=random.randrange(0xffffff))
embed.set_footer(text=f"次の掛け金:{gold * 2} G")
embed = discord.Embed(title=f"当たり!次の穴が最後!({depth}回目)",
description=f"{HOLE}\t{HOLE}\t{HOLE}\n{LEFT_ARROW}\t{TOP_ARROW}\t{RIGHT_ARROW}",
color=random.randrange(0xffffff))
embed.set_footer(text=f"掛け金:{gold * 2} G")
await game_msg.edit(embed=embed)
await game_msg.clear_reactions() # 最後は中間にもう一つ穴が追加されるので、全てのリアクションを削除しておく
for emoji in final_emojis:
await game_msg.add_reaction(emoji)
def hole_check_final(reaction,user):
# 最後の穴の入力チェック用
react_msg = reaction.message
are_same_msgs = react_msg.id == game_msg.id and react_msg.channel == game_msg.channel
return are_same_msgs and user == ctx.author and str(reaction.emoji) in final_emojis
try:
await bot.wait_for("reaction_add",check=hole_check_final,timeout=30.0)
except asyncio.TimeoutError:
embed = discord.Embed(title="エラー",
description=f"{ctx.author.mention} 穴の選択が正常に行われませんでした。コマンドの処理を終了します。",
color=0xff0000)
await ctx.send(embed=embed)
return
if random.randrange(3) != 2:
# 3分の2の確率ではずれを引く
embed = discord.Embed(title="はずれ。",
description=f"{ctx.author.mention} 君たちは一体今までにいくら貢いだんだろうね",color=0x00ff00)
await ctx.send(embed=embed)
return
gold *= 2
embed = discord.Embed(title="おめでとう!",
description=f"{ctx.author.mention} **{gold}** G入手したよ!\n達成できたのは今回で…何回目だったっけ",color=0x0000ff)
await ctx.send(embed=embed)
|
3ec1394d681fcb0e626e98b11bd815dddbe64254
| 3,638,663
|
def read_version(file_contents):
"""Read the project setting from pyproject.toml."""
data = tomlkit.loads(file_contents)
details = data["tool"]["poetry"]
return details["version"]
|
7255c199463437765d21658f792a54049bbb45ee
| 3,638,664
|
from datetime import datetime
def schedule_time(check_start_time, check_end_time, time_duaration=7) -> dict:
""" Returns dictionary of earliest available time within the next week """
all_busy_events = get_busy_events()
for d in range(1,time_duaration):
# Increment by one day throughout the week
check_day = datetime.today().date() + timedelta(d)
if all_busy_events:
# ! still something wrong
is_day_free = []
is_time_overlapping = False
is_time_free = True
for start,end in [event for event in all_busy_events if event[0].date() == check_day]:
is_time_overlapping = is_time_between(check_start_time, check_end_time, start.time()) and is_time_between(check_start_time, check_end_time, end.time())
is_time_free = not is_time_between(start.time(), end.time(), check_start_time) and not is_time_between(start.time(), end.time(), check_end_time)
is_day_free.append(is_time_free)
if all(is_day_free) and not is_time_overlapping:
appointment_start = datetime.combine(check_day, check_start_time)
appointment_end = datetime.combine(check_day, check_end_time)
return {"start": appointment_start, "end": appointment_end}
else:
# Schedule time for tomorrrow if no busy events within the next week
return {"start": datetime.combine(check_day, check_start_time), "end": datetime.combine(check_day, check_end_time)}
|
858387e8d07634df7a143b3ee500e648ed54abd6
| 3,638,665
|
import array
def _to_array(value):
"""When `value` is a plain Python sequence, return it as a NumPy array."""
if not hasattr(value, 'shape') and hasattr(value, '__len__'):
return array(value)
else:
return value
|
3bf185f34c51dc2042bdb05138b0febd9e89b421
| 3,638,666
|
def dict_pix_to_deg(input_dict, changeN):
"""Convert pix to deg for a given dictionary format,
changeN is 1 or 2, to let the function works for the first
or both elements of the tuple"""
dict_deg = {}
for key, values in input_dict.items():
new_display = []
for display in values:
new_posi = []
for posi in display:
new_posi.append(__pix_to_deg_tuple(posi, changeN))
new_display.append(new_posi)
dict_deg.update({key: new_display})
return dict_deg
|
03c49e113c8805c4d675899f3c61e3ae00bd7681
| 3,638,667
|
def remove_container_name_from_blob_path(blob_path, container_name):
"""
Get the bit of the filepath after the container name.
"""
# container name will often be part of filepath - we want
# the blob name to be the bit after that
if not container_name in blob_path:
return blob_path
blob_name_parts = []
filepath_parts = split_filepath(blob_path)
container_name_found = False
for path_part in filepath_parts:
if container_name_found:
blob_name_parts.append(path_part)
if path_part == container_name:
container_name_found = True
if len(blob_name_parts) == 0:
return ""
return "/".join(blob_name_parts)
|
e02807abebdf3a193efcabee1dda3f733a780dd5
| 3,638,668
|
from typing import Dict
from typing import List
def _complex_ar_from_dict(dic: Dict[str, List]) -> np.ndarray:
"""Construct complex array from dictionary of real and imaginary parts"""
out = np.array(dic["real"], dtype=complex)
out.imag = np.array(dic["imag"], dtype=float)
return out
|
a3938619f84c2dcbec5c9ac90c0064ea380346c4
| 3,638,669
|
def endpoint(url_pattern, method="GET"):
"""
:param url_pattern:
:param method:
:param item:
:return:
"""
def wrapped_func(f):
@wraps(f)
def inner_func(self, *args, **kwargs):
"""
:param self:
:param args:
:param kwargs:
:return:
"""
func_params = translate_params(f, *args, **kwargs)
params = translate_special_params(func_params, self.special_attributes_map)
response = None
if method == "GET":
response = self._get(url_pattern, params=params)
elif method == "POST":
response = self._post(url_pattern, params=params)
if response:
try:
if response.headers["Content-Type"] == "application/json":
return response.json()
else:
return response.text
except Exception as e:
return response.content
return inner_func
return wrapped_func
|
23b68a1440e96eac27926f2a37d96cb74a568734
| 3,638,670
|
def elastic_transform(
image,
alpha,
sigma,
alpha_affine,
interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REFLECT_101,
random_state=None,
approximate=False,
):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
if random_state is None:
random_state = np.random.RandomState(1234)
height, width = image.shape[:2]
# Random affine
center_square = np.float32((height, width)) // 2
square_size = min((height, width)) // 3
alpha = float(alpha)
sigma = float(sigma)
alpha_affine = float(alpha_affine)
pts1 = np.float32(
[
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size,
]
)
pts2 = pts1 + random_state.uniform(
-alpha_affine, alpha_affine, size=pts1.shape
).astype(np.float32)
matrix = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(
image, matrix, (width, height), flags=interpolation, borderMode=border_mode
)
if approximate:
# Approximate computation smooth displacement map with a large enough kernel.
# On large images (512+) this is approximately 2X times faster
dx = random_state.rand(height, width).astype(np.float32) * 2 - 1
cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx)
dx *= alpha
dy = random_state.rand(height, width).astype(np.float32) * 2 - 1
cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy)
dy *= alpha
else:
dx = np.float32(
gaussian_filter((random_state.rand(height, width) * 2 - 1), sigma) * alpha
)
dy = np.float32(
gaussian_filter((random_state.rand(height, width) * 2 - 1), sigma) * alpha
)
x, y = np.meshgrid(np.arange(width), np.arange(height))
mapx = np.float32(x + dx)
mapy = np.float32(y + dy)
return cv2.remap(image, mapx, mapy, interpolation, borderMode=border_mode)
|
e60754cc898d83051164180b1d07ac0e4c688946
| 3,638,671
|
import re
from orangecontrib.xoppy.util.xoppy_xraylib_util import f0_xop
def crystal_atnum(list_AtomicName, unique_AtomicName, unique_Zatom,list_fraction, f0coeffs):
"""
To get the atom and fractional factor in diffierent sites
list_AtomicName: list of all atoms in the crystal
unique_AtomicName: list of unique atomicname in the list
unique_Zatom: list of unique atomic number
list_fraction: list of unique fractial factor
return: num_e, fract, n_atom, list of number of electrons for atom with same fractional factor, and corresponding fractional factor, atomic number
"""
num_e = []
fract = []
n_atom = []
n_ATUM = []
for k,x in enumerate(unique_AtomicName):
tmp1 = re.search('(^[a-zA-Z]*)',x)
if tmp1.group(0) == x: #AtomicName only, without valence info (i.e., B, Y, O)
f0 = f0_xop(unique_Zatom[k])
else:
#f0 = f0_xop(0,AtomicName=x)
f0 = f0coeffs[x]
icentral = int(len(f0)/2)
F000 = f0[icentral]
for i in range(icentral):
F000 += f0[i]
a=[list_fraction[i] for i,v in enumerate(list_AtomicName) if v==x]
fac = list(set(a))
for y in fac:
n = a.count(y)
num_e.append(F000)
fract.append(y)
n_atom.append(n)
n_ATUM.append(unique_Zatom[k])
return num_e.copy(), fract.copy(), n_atom.copy(),n_ATUM.copy()
|
f9805890971e1c2e6696084fad5f8b9071999046
| 3,638,672
|
def integrate(name, var):
""" given filename and var, generate profile """
d = vtk.vtkExodusIIReader()
d.SetFileName(name)
d.UpdateInformation()
d.SetPointResultArrayStatus(var,1)
d.Update()
blocks = d.GetOutput().GetNumberOfBlocks()
data = d.GetOutput()
# range to integrate at
height = 0.804380714893
thresh = 0.004
rmin = 0.0
rmax = 1.0
nr = 10
dr = (rmax-rmin)/nr
rint = np.zeros(nr)
rn = np.ones(nr)
for j in xrange(blocks):
blk = data.GetBlock(0).GetBlock(j)
pts = blk.GetNumberOfPoints()
pt_data = blk.GetPointData().GetArray(var)
for i in xrange(pts):
# gather x,y,z location
z,y,x = blk.GetPoint(i)
# gather point scalar value
u = pt_data.GetValue(i)
# now, find all values near the target height
# (convert to cylindrical)
if(abs(z - height) < thresh):
r = np.sqrt((x)**2 + (y)**2)
fr = np.floor(r/dr)
rint[fr] += u
rn [fr] += 1
return rint/rn
|
116993d18c4430f6ce0e7dacba8b73ef3a03f689
| 3,638,673
|
def mediate(timer: TimerBase, decimals: int | None) -> int:
"""If the start function doesn't have decimals defined, then use the decimals value defined when the Timer() was initiated."""
return timer.decimals if decimals is None else validate_and_normalise(decimals)
|
030ec62071bc4c2bc41ae30c5eb8212b36e0359a
| 3,638,674
|
def calculate_n_inputs(inputs, config_dict):
"""
Calculate the number of inputs for a particular model.
"""
input_size = 0
for input_name in inputs:
if input_name == 'action':
input_size += config_dict['prior_args']['n_variables']
elif input_name == 'state':
input_size += config_dict['misc_args']['state_size']
elif input_name == 'reward':
input_size += 1
elif input_name in ['params', 'grads']:
if config_dict['approx_post_args']['constant_scale']:
input_size += config_dict['prior_args']['n_variables']
else:
input_size += 2 * config_dict['prior_args']['n_variables']
return input_size
|
78d750ff4744d872d696dcb454933c868b0ba41e
| 3,638,675
|
def coords(lat: float, lon: float, alt: float = None ) -> str:
"""Turn longitude, latitude into a printable string."""
txt = "%2.4f%s" % (abs(lat), "N" if lat>0 else "S")
txt += " %2.4f%s" % (abs(lon), "E" if lon>0 else "W")
if alt:
txt += " %2.0fm" % alt
return txt
|
c5768e03c55d5f567695056d78108812014b9ef4
| 3,638,678
|
def chromosome_to_smiles():
"""Wrapper function for simplicity."""
def sc2smi(chromosome):
"""Generate a SMILES string from a list of SMILES characters. To be customized."""
silyl = "([Si]([C])([C])([C]))"
core = chromosome[0]
phosphine_1 = (
"(P(" + chromosome[1] + ")(" + chromosome[2] + ")(" + chromosome[3] + "))"
)
phosphine_2 = (
"(P(" + chromosome[4] + ")(" + chromosome[5] + ")(" + chromosome[6] + "))"
)
smiles = "{0}{1}{2}{3}".format(core, phosphine_1, phosphine_2, silyl)
return smiles
return sc2smi
|
793995484c46295977f1d312c4fa11f69bca6c84
| 3,638,679
|
def softmax_edges(graph, feat):
"""Apply batch-wise graph-level softmax over all the values of edge field
:attr:`feat` in :attr:`graph`.
Parameters
----------
graph : DGLGraph
The graph.
feat : str
The feature field.
Returns
-------
tensor
The tensor obtained.
Examples
--------
>>> import dgl
>>> import torch as th
Create two :class:`~dgl.DGLGraph` objects and initialize their
edge features.
>>> g1 = dgl.DGLGraph() # Graph 1
>>> g1.add_nodes(2)
>>> g1.add_edges([0, 1], [1, 0])
>>> g1.edata['h'] = th.tensor([[1., 0.], [2., 0.]])
>>> g2 = dgl.DGLGraph() # Graph 2
>>> g2.add_nodes(3)
>>> g2.add_edges([0, 1, 2], [1, 2, 0])
>>> g2.edata['h'] = th.tensor([[1., 0.], [2., 0.], [3., 0.]])
Softmax over edge attribute :attr:`h` in a batched graph.
>>> bg = dgl.batch([g1, g2], edge_attrs='h')
>>> dgl.softmax_edges(bg, 'h')
tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.])
[0.7311, 0.5000], # [0.5000, 0.5000] = softmax([0., 0.])
[0.0900, 0.3333], # [0.0900, 0.2447, 0.6652] = softmax([1., 2., 3.])
[0.2447, 0.3333], # [0.3333, 0.3333, 0.3333] = softmax([0., 0., 0.])
[0.6652, 0.3333]])
Softmax over edge attribute :attr:`h` in a single graph.
>>> dgl.softmax_edges(g1, 'h')
tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.])
[0.7311, 0.5000]]), # [0.5000, 0.5000] = softmax([0., 0.])
Notes
-----
If the input graph has batch size greater then one, the softmax is applied at each
example in the batch.
"""
return _softmax_on(graph, 'edges', feat)
|
f5dafccca3c487756deeb37f534ef178cf1de75f
| 3,638,680
|
def command_result_processor_parameter_required(command_line_parameter):
"""
Command result message processor if a parameter stays unsatisfied.
Parameters
----------
command_line_parameter : ``CommandLineParameter``
Respective command parameter.
Returns
-------
message : `str`
"""
message_parts = []
message_parts.append('Parameter: ')
message_parts.append(repr(command_line_parameter.name))
message_parts.append(' is required.\n')
return ''.join(message_parts)
|
fed1b7af60018cb5638e021365ae754477b7a241
| 3,638,681
|
def randdirichlet(a):
""" Python implementation of randdirichlet.m using randomgamma fucnction
:param a: vector of weights (shape parameters to the gamma distribution)
"""
try:
x = rand.randomgamma(a)
except ValueError:
a[a == 0] += 1e-16
x = rand.randomgamma(a)
x /= x.sum(axis=0)
return x
|
c825fb81c07337231f49437a2bea7ddd5a40234f
| 3,638,682
|
def home(request):
"""
This is the home page request
"""
return render(request, 'generator/home.html')
|
ad8b69871d484c16583752d029fec2970084e698
| 3,638,683
|
def print_insn_mnem(ea):
"""
Get instruction mnemonics
@param ea: linear address of instruction
@return: "" - no instruction at the specified location
@note: this function may not return exactly the same mnemonics
as you see on the screen.
"""
res = ida_ua.ua_mnem(ea)
if not res:
return ""
else:
return res
|
4c60e853356217c2fbfdd21047429c729b57f10f
| 3,638,684
|
def setdim(P, dim=None):
"""
Adjust the dimensions of a polynomial.
Output the results into Poly object
Args:
P (Poly) : Input polynomial
dim (int) : The dimensions of the output polynomial. If omitted,
increase polynomial with one dimension. If the new dim is
smaller then P's dimensions, variables with cut components are
all cut.
Examples:
>>> x,y = chaospy.variable(2)
>>> P = x*x-x*y
>>> print(chaospy.setdim(P, 1))
q0^2
"""
P = P.copy()
ldim = P.dim
if not dim:
dim = ldim+1
if dim==ldim:
return P
P.dim = dim
if dim>ldim:
key = np.zeros(dim, dtype=int)
for lkey in P.keys:
key[:ldim] = lkey
P.A[tuple(key)] = P.A.pop(lkey)
else:
key = np.zeros(dim, dtype=int)
for lkey in P.keys:
if not sum(lkey[ldim-1:]) or not sum(lkey):
P.A[lkey[:dim]] = P.A.pop(lkey)
else:
del P.A[lkey]
P.keys = sorted(P.A.keys(), key=sort_key)
return P
|
610138c1d1a13112d35583d758cac43c1e296d18
| 3,638,685
|
def extract_file_from_zip(zipfile, filename):
"""
Returns the compressed file `filename` from `zipfile`.
"""
raise NotImplementedError()
return None
|
dc7b1e5a196a019d1fd2274155e0404b03b09702
| 3,638,686
|
import torch
def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None):
"""
Args:
cls_scores: (N, num_class)
box_preds: (N, 7 + C)
nms_config:
score_thresh:
Returns:
"""
pred_scores, pred_labels, pred_boxes = [], [], []
for k in range(cls_scores.shape[1]):
if score_thresh is not None:
scores_mask = (cls_scores[:, k] >= score_thresh)
box_scores = cls_scores[scores_mask, k]
cur_box_preds = box_preds[scores_mask]
else:
box_scores = cls_scores[:, k]
cur_box_preds = box_preds
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
boxes_for_nms = cur_box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
pred_scores.append(box_scores[selected])
pred_labels.append(box_scores.new_ones(len(selected)).long() * k)
pred_boxes.append(cur_box_preds[selected])
pred_scores = torch.cat(pred_scores, dim=0)
pred_labels = torch.cat(pred_labels, dim=0)
pred_boxes = torch.cat(pred_boxes, dim=0)
return pred_scores, pred_labels, pred_boxes
|
a0451c3769b4415e7e7d184d43f6b8f121b651b1
| 3,638,688
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.