content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def terms_documents_matrix_ticcl_frequency(in_files):
"""Returns a terms document matrix and related objects of a corpus
A terms document matrix contains frequencies of wordforms, with wordforms
along one matrix axis (columns) and documents along the other (rows).
Inputs:
in_files: list of ticcl frequency files (one per document in the
corpus)
Returns:
corpus: a sparse terms documents matrix
vocabulary: the vectorizer object containing the vocabulary (i.e., all word forms
in the corpus)
"""
vocabulary = DictVectorizer()
corpus = vocabulary.fit_transform(ticcl_frequency(in_files))
return corpus, vocabulary | 25e6cf8ca1696ebb1d5d7f72ddd90fe091e22030 | 8,032 |
def cvt_continue_stmt(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""continue_stmt: 'continue'"""
#-# Continue
assert ctx.is_REF, [node]
return ast_cooked.ContinueStmt() | 1eefd660e9023aa69957cf1004369d6495048437 | 8,033 |
def nth_even(n):
"""Function I wrote that returns the nth even number."""
return (n * 2) - 2 | 26e1465a039352917647ae650d653ed9842db7f6 | 8,034 |
def _has__of__(obj):
"""Check whether an object has an __of__ method for returning itself
in the context of a container."""
# It is necessary to check both the type (or we get into cycles)
# as well as the presence of the method (or mixins of Base pre- or
# post-class-creation as done in, e.g.,
# zopefoundation/Persistence) can fail.
return isinstance(obj, ExtensionClass.Base) and hasattr(type(obj), '__of__') | 638b6ed823acf2a46ae5a5cda6d3565fad498364 | 8,035 |
def grayscale(img):
"""
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread() | 3c3b0508850c5cdaf2617ed37c6f5eea79be64d0 | 8,036 |
def get_pageinfo(response, tracktype='recenttracks'):
"""Check how many pages of tracks the user have."""
xmlpage = ET.fromstring(response)
totalpages = xmlpage.find(tracktype).attrib.get('totalPages')
return int(totalpages) | ed5c05bcc648d4a22c5f1b51b196743bf6883dff | 8,037 |
def MIN(*args):
"""Return the minimum of a range or list of Number or datetime"""
return _group_function(min, *args) | 044d1d433901f6ed3308aad711a6bf7eca4e2301 | 8,038 |
def GF(order, irreducible_poly=None, primitive_element=None, verify_irreducible=True, verify_primitive=True, mode="auto", target="cpu"):
"""
Factory function to construct a Galois field array class of type :math:`\\mathrm{GF}(p^m)`.
The created class will be a subclass of :obj:`galois.FieldArray` with metaclass :obj:`galois.FieldMeta`.
The :obj:`galois.FieldArray` inheritance provides the :obj:`numpy.ndarray` functionality. The :obj:`galois.FieldMeta` metaclass
provides a variety of class attributes and methods relating to the finite field.
Parameters
----------
order : int
The order :math:`p^m` of the field :math:`\\mathrm{GF}(p^m)`. The order must be a prime power.
irreducible_poly : int, galois.Poly, optional
Optionally specify an irreducible polynomial of degree :math:`m` over :math:`\\mathrm{GF}(p)` that will
define the Galois field arithmetic. An integer may be provided, which is the integer representation of the
irreducible polynomial. Default is `None` which uses the Conway polynomial :math:`C_{p,m}` obtained from :func:`galois.conway_poly`.
primitive_element : int, galois.Poly, optional
Optionally specify a primitive element of the field :math:`\\mathrm{GF}(p^m)`. A primitive element is a generator of
the multiplicative group of the field. For prime fields :math:`\\mathrm{GF}(p)`, the primitive element must be an integer
and is a primitive root modulo :math:`p`. For extension fields :math:`\\mathrm{GF}(p^m)`, the primitive element is a polynomial
of degree less than :math:`m` over :math:`\\mathrm{GF}(p)` or its integer representation. The default is `None` which uses
:obj:`galois.primitive_root(p)` for prime fields and :obj:`galois.primitive_element(irreducible_poly)` for extension fields.
verify_irreducible : bool, optional
Indicates whether to verify that the specified irreducible polynomial is in fact irreducible. The default is
`True`. For large irreducible polynomials that are already known to be irreducible (and may take a long time to verify),
this argument can be set to `False`.
verify_primitive : bool, optional
Indicates whether to verify that the specified primitive element is in fact a generator of the multiplicative group.
The default is `True`.
mode : str, optional
The type of field computation, either `"auto"`, `"jit-lookup"`, or `"jit-calculate"`. The default is `"auto"`.
The "jit-lookup" mode will use Zech log, log, and anti-log lookup tables for efficient calculation. The "jit-calculate"
mode will not store any lookup tables, but instead perform field arithmetic on the fly. The "jit-calculate" mode is
designed for large fields that cannot or should not store lookup tables in RAM. Generally, "jit-calculate" mode will
be slower than "jit-lookup". The "auto" mode will determine whether to use "jit-lookup" or "jit-calculate" based on the field's
size. In "auto" mode, field's with `order <= 2**16` will use the "jit-lookup" mode.
target : str, optional
The `target` keyword argument from :func:`numba.vectorize`, either `"cpu"`, `"parallel"`, or `"cuda"`.
Returns
-------
galois.FieldMeta
A new Galois field array class that is a subclass of :obj:`galois.FieldArray` with :obj:`galois.FieldMeta` metaclass.
Examples
--------
Construct a Galois field array class with default irreducible polynomial and primitive element.
.. ipython:: python
# Construct a GF(2^m) class
GF256 = galois.GF(2**8)
# Notice the irreducible polynomial is primitive
print(GF256.properties)
poly = GF256.irreducible_poly
Construct a Galois field specifying a specific irreducible polynomial.
.. ipython:: python
# Field used in AES
GF256_AES = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]))
print(GF256_AES.properties)
# Construct a GF(p) class
GF571 = galois.GF(571); print(GF571.properties)
# Construct a very large GF(2^m) class
GF2m = galois.GF(2**100); print(GF2m.properties)
# Construct a very large GF(p) class
GFp = galois.GF(36893488147419103183); print(GFp.properties)
See :obj:`galois.FieldArray` for more examples of what Galois field arrays can do.
"""
if not isinstance(order, int):
raise TypeError(f"Argument `order` must be an integer, not {type(order)}.")
p, k = prime_factors(order)
if not len(p) == len(k) == 1:
s = " + ".join([f"{pp}**{kk}" for pp, kk in zip(p, k)])
raise ValueError(f"Argument `order` must be a prime power, not {order} = {s}.")
p, m = p[0], k[0]
if m == 1:
if not irreducible_poly is None:
raise ValueError(f"Argument `irreducible_poly` can only be specified for prime fields, not the extension field GF({p}^{m}).")
return GF_prime(p, primitive_element=primitive_element, verify_primitive=verify_primitive, target=target, mode=mode)
else:
return GF_extension(p, m, primitive_element=primitive_element, irreducible_poly=irreducible_poly, verify_primitive=verify_primitive, verify_irreducible=verify_irreducible, target=target, mode=mode) | d09dea199559aad111e6aa30a2c391da9ae6b551 | 8,039 |
import typing
def remove_fields_with_value_none(fields: typing.Dict) -> typing.Dict:
"""
Remove keys whose value is none
:param fields: the fields to clean
:return: a copy of fields, without the none values
"""
fields = dict((key, value) for key, value in fields.items() if
value is not None) # Strip out none values
return fields | 22d7ac2a77248809c691bdb98f5f6ebaaf6d4f2b | 8,040 |
def make_values(params, point): #240 (line num in coconut source)
"""Return a dictionary with the values replaced by the values in point,
where point is a list of the values corresponding to the sorted params.""" #242 (line num in coconut source)
values = {} #243 (line num in coconut source)
for i, k in (enumerate)((sorted)(params)): #244 (line num in coconut source)
values[k] = point[i] #245 (line num in coconut source)
return values | 8287b49e54cb08802350a3a15805dc20def10ece | 8,041 |
def elbow_method(data):
"""
This function will compute elbow method and generate elbow visualization
:param data: 2 columns dataframe for cluster analysis
:return: Plotly Figures
"""
distortions = []
K = range(1, 10)
for k in K:
elbow_kmean = model_kmeans(data, k)
distortions.append(elbow_kmean.inertia_)
elbow = pd.DataFrame({'k': K,
'inertia': distortions})
fig = go.Figure(data=go.Scatter(x=elbow['k'], y=elbow['inertia']))
fig.update_layout(title='Elbows Methods for finding best K values in KMeans',
xaxis_title='K',
yaxis_title='Inertia')
return fig | 5420ce252f8a89ae3540ce37cbfd4f31f0cbe93e | 8,042 |
from typing import Callable
def sa_middleware(key: str = DEFAULT_KEY) -> 'Callable':
""" SQLAlchemy asynchronous middleware factory. """
@middleware
async def sa_middleware_(request: 'Request', handler: 'Callable')\
-> 'StreamResponse':
if key in request:
raise DuplicateRequestKeyError(key)
Session = request.config_dict.get(key)
async with Session() as request[key]:
return await handler(request)
return sa_middleware_ | df4da137e45fcaa2962626a4f3676d9f7b9ecce9 | 8,043 |
def dice_loss(y_true, y_pred):
"""
dice_loss
"""
smooth = 1.
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
dice_coef = (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + \
K.sum(K.square(y_pred),-1) + smooth)
return 1 - dice_coef | 863f69071375f37fed3c8910e52c1ffbda14cd71 | 8,044 |
def webdriver_init(mobile):
"""
Initialize a mobile/desktop web driver.
This initialize a web driver with a default user agent regarding the mobile
demand. Default uer agents are defined by MOBILE_USER_AGENT and DESKTOP_USER_AGENT.
:param mobile: The mobile flag
:type conn: bool
:return: A web driver
:rtype: WebDriver
"""
if mobile:
return webdriver_init_with_caps(MOBILE_USER_AGENT)
else:
return webdriver_init_with_caps(DESKTOP_USER_AGENT) | 49215bfd5363b9e7e82329b42b97ad04402b7edb | 8,045 |
import hashlib
def calculate_file_hash(f, alg, buf_size):
"""BUF_SIZE - 64 kb
need for large file"""
h = hashlib.new(alg)
for chunk in iter(lambda: f.read(buf_size), b""):
h.update(chunk)
return h.hexdigest() | 6361ef8f18f5ae66e1d51503426c77f7505e10be | 8,046 |
def update(A, B, DA, DB, f, k, delta_t):
"""Apply the Gray-Scott update formula"""
# compute the diffusion part of the update
diff_A = DA * apply_laplacian(A)
diff_B = DB * apply_laplacian(B)
# Apply chemical reaction
reaction = A*B**2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1-A)
diff_B -= (k+f) * B
A += diff_A * delta_t
B += diff_B * delta_t
return A, B | 75c2004ea089d5b3a9f4ec71fc27510d1c0dc5c0 | 8,047 |
import typing
import hashlib
def sha512(data: typing.Optional[bytes] = None):
"""Returns a sha512 hash object; optionally initialized with a string."""
if data is None:
return hashlib.sha512()
return hashlib.sha512(data) | 067fffc4c006d9c46e5037b07b86149ac15bb573 | 8,048 |
def get_entropy_of_maxes():
"""
Specialized code for retrieving guesses and confidence of largest model of each type from the images giving largest
entropy.
:return: dict containing the models predictions and confidence, as well as the correct label under "y".
"""
high_entropy_list = get_high_entropy_mnist_test()
d = {}
images = []
values = []
for i in high_entropy_list:
images.append(i[0])
values.append(i[1])
d["y"] = np.array(values)
d["d"] = []
d["f"] = []
model_paths = ["ffnn_models", "dropout_models"]
for model in model_paths:
pred = model_predictor(model + "/model_50000", np.array(images), np.array(values))[0]
for i in pred:
d[model[0]].append((np.argmax(i), i))
return d | 9a43ac44a61776d25d3b46a7fb733d95720e3beb | 8,050 |
from typing import List
from typing import Dict
from typing import Any
def get_all_links() -> List[Dict[str, Any]]:
"""Returns all links as an iterator"""
return get_entire_collection(LINKS_COLLECTION) | 1bda0ac68f778c77914163dd7855491cc04a2c97 | 8,051 |
def agent(states, actions):
"""
creating a DNN using keras
"""
model = Sequential()
model.add(Flatten(input_shape=(1, states)))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actions, activation='linear'))
return model | b9f8415955cc1b01dbe46b94dd84ab3daa6811c2 | 8,052 |
def get_package_nvr_from_spec(spec_file):
"""
Return a list of the NVR required for a given spec file
:param spec_file: The path to a spec file
:type spec_file: str
:return: list of nevra that should be built for that spec file
:rtype: str
"""
# Get the dep name & version
spec = rpm.spec(spec_file)
package_nvr = spec.sourceHeader[rpm.RPMTAG_NVR]
# split the dist from the end of the nvr
package_nvr = package_nvr[:package_nvr.rfind('.')]
return package_nvr | 9f8c5e5451d9b7fd3721881645688781b221e08d | 8,053 |
def rbo_ext(S, T, p):
"""Extrapolated RBO value as defined in equation (30).
Implementation handles uneven lists but not ties.
"""
if len(S) > len(T):
L, S = S, T
else:
L, S = T, S
l, s = len(L), len(S)
xl = overlap(L, S, l)
xs = overlap(L, S, s)
sum1 = sum(overlap(L, S, d) / d * p ** d for d in range(1, l + 1))
sum2 = sum(xs * (d - s) / (s * d) * p ** d for d in range(s + 1, l + 1))
return (1 - p) / p * (sum1 + sum2) + ((xl - xs) / l + xs / s) * p ** l | 367c979f8ece073e86e9fdc48a26bb393f0236be | 8,054 |
def index(request):
""" View of index page """
title = _("Home")
posts = Post.objects.all().order_by('-timestamp')[:5]
return render(request, 'dashboard/index.html', locals()) | a152bfad756be809c56e6dc203cb7cf9d29f4868 | 8,055 |
def incremental_str_maker(str_format='{:03.f}'):
"""Make a function that will produce a (incrementally) new string at every call."""
i = 0
def mk_next_str():
nonlocal i
i += 1
return str_format.format(i)
return mk_next_str | 41ce6e7d7ba69922f92e73ee516e9c09fdbe0713 | 8,056 |
def get_time_slider_range(highlighted=True,
withinHighlighted=True,
highlightedOnly=False):
"""Return the time range from Maya's time slider.
Arguments:
highlighted (bool): When True if will return a selected frame range
(if there's any selection of more than one frame!) otherwise it
will return min and max playback time.
withinHighlighted (bool): By default Maya returns the highlighted range
end as a plus one value. When this is True this will be fixed by
removing one from the last number.
Returns:
list: List of two floats of start and end frame numbers.
"""
if highlighted is True:
gPlaybackSlider = mel.eval("global string $gPlayBackSlider; "
"$gPlayBackSlider = $gPlayBackSlider;")
if cmds.timeControl(gPlaybackSlider, query=True, rangeVisible=True):
highlightedRange = cmds.timeControl(gPlaybackSlider,
query=True,
rangeArray=True)
if withinHighlighted:
highlightedRange[-1] -= 1
return highlightedRange
if not highlightedOnly:
return [cmds.playbackOptions(query=True, minTime=True),
cmds.playbackOptions(query=True, maxTime=True)] | f05ca2bfec8bcfb41be9a0fa83a724d180dc545f | 8,057 |
def update_IW(hyp_D_prev, xikk, xk, Pik_old):
"""
Do an update of Norm-IW conjugate in an exponential form.
"""
suff_D = get_suff_IW_conj(xikk, xk, Pik_old)
hyp_D = hyp_D_prev + suff_D
Dik = get_E_IW_hyp(hyp_D)
return Dik, hyp_D | d787edf13e18cdbc1c6ff3a65168f32ff8c28b1f | 8,058 |
def compute_state(observations, configuration):
"""
:param observations:
:param configuration:
:return StateTensor:
"""
StateTensorType = configuration.STATE_TYPE
return StateTensorType([observations]) | 44a08caa02137438359c4cd764fff1700b6252b2 | 8,059 |
def supports_transfer_syntax(transfer_syntax: pydicom.uid.UID) -> bool:
"""Return ``True`` if the handler supports the `transfer_syntax`.
Parameters
----------
transfer_syntax : uid.UID
The Transfer Syntax UID of the *Pixel Data* that is to be used with
the handler.
"""
return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES | 65f85a47afc5002ed33e4ad787317d67b4dab218 | 8,060 |
def enhance_user(user, json_safe=False):
"""
Adds computed attributes to AD user results
Args:
user: A dictionary of user attributes
json_safe: If true, converts binary data into base64,
And datetimes into human-readable strings
Returns:
An enhanced dictionary of user attributes
"""
if "memberOf" in user.keys():
user["memberOf"] = sorted(user["memberOf"], key=lambda dn: dn.lower())
if "showInAddressBook" in user.keys():
user["showInAddressBook"] = sorted(user["showInAddressBook"], key=lambda dn: dn.lower())
if "lastLogonTimestamp" in user.keys():
user["lastLogonTimestamp"] = _get_last_logon(user["lastLogonTimestamp"])
if "lockoutTime" in user.keys():
user["lockoutTime"] = convert_ad_timestamp(user["lockoutTime"], json_safe=json_safe)
if "pwdLastSet" in user.keys():
user["pwdLastSet"] = convert_ad_timestamp(user["pwdLastSet"], json_safe=json_safe)
if "userAccountControl" in user.keys():
user["userAccountControl"] = int(user["userAccountControl"])
user["disabled"] = user["userAccountControl"] & 2 != 0
user["passwordExpired"] = user["userAccountControl"] & 8388608 != 0
user["passwordNeverExpires"] = user["userAccountControl"] & 65536 != 0
user["smartcardRequired"] = user["userAccountControl"] & 262144 != 0
if "whenCreated" in user.keys():
user["whenCreated"] = convert_ad_timestamp(user["whenCreated"], json_safe=json_safe)
if "msExchRecipientTypeDetails" in user.keys():
user["msExchRecipientTypeDetails"] = int(user["msExchRecipientTypeDetails"])
user["remoteExchangeMailbox"] = user["msExchRecipientTypeDetails"] in remote_exchange_mailbox_values
user["exchangeMailbox"] = user["msExchRecipientTypeDetails"] in exchange_mailbox_values.keys()
if user["exchangeMailbox"]:
user["exchangeMailboxType"] = exchange_mailbox_values[user["msExchRecipientTypeDetails"]]
return user | 4b6fd08440c9c92d074e639f803b242f044f2ea3 | 8,061 |
from datetime import datetime
import random
def create_data(namespace_id, ocs_client):
"""Creates sample data for the script to use"""
double_type = SdsType(id='doubleType', sdsTypeCode=SdsTypeCode.Double)
datetime_type = SdsType(
id='dateTimeType', sdsTypeCode=SdsTypeCode.DateTime)
pressure_property = SdsTypeProperty(id='pressure', sdsType=double_type)
temperature_property = SdsTypeProperty(id=SAMPLE_FIELD_TO_CONSOLIDATE_TO,
sdsType=double_type)
ambient_temperature_property = SdsTypeProperty(id=SAMPLE_FIELD_TO_CONSOLIDATE,
sdsType=double_type)
time_property = SdsTypeProperty(id='time', sdsType=datetime_type,
isKey=True)
sds_type_1 = SdsType(
id=SAMPLE_TYPE_ID_1,
description='This is a sample Sds type for storing Pressure type '
'events for Data Views',
sdsTypeCode=SdsTypeCode.Object,
properties=[pressure_property, temperature_property, time_property])
sds_type_2 = SdsType(
id=SAMPLE_TYPE_ID_2,
description='This is a new sample Sds type for storing Pressure type '
'events for Data Views',
sdsTypeCode=SdsTypeCode.Object,
properties=[pressure_property, ambient_temperature_property, time_property])
print('Creating SDS Types...')
ocs_client.Types.getOrCreateType(namespace_id, sds_type_1)
ocs_client.Types.getOrCreateType(namespace_id, sds_type_2)
stream1 = SdsStream(
id=SAMPLE_STREAM_ID_1,
name=SAMPLE_STREAM_NAME_1,
description='A Stream to store the sample Pressure events',
typeId=SAMPLE_TYPE_ID_1)
stream2 = SdsStream(
id=SAMPLE_STREAM_ID_2,
name=SAMPLE_STREAM_NAME_2,
description='A Stream to store the sample Pressure events',
typeId=SAMPLE_TYPE_ID_2)
print('Creating SDS Streams...')
ocs_client.Streams.createOrUpdateStream(namespace_id, stream1)
ocs_client.Streams.createOrUpdateStream(namespace_id, stream2)
sample_start_time = datetime.datetime.now() - datetime.timedelta(hours=1)
sample_end_time = datetime.datetime.now()
values1 = []
values2 = []
def value_with_time(timestamp, value, field_name, value2):
"""Formats a JSON data object"""
return f'{{"time": "{timestamp}", "pressure": {str(value)}, "{field_name}": {str(value2)}}}'
print('Generating values...')
for i in range(1, 30, 1):
timestamp = (sample_start_time + datetime.timedelta(minutes=i * 2)
).isoformat(timespec='seconds')
val1 = value_with_time(timestamp, random.uniform(
0, 100), SAMPLE_FIELD_TO_CONSOLIDATE_TO, random.uniform(50, 70))
val2 = value_with_time(timestamp, random.uniform(
0, 100), SAMPLE_FIELD_TO_CONSOLIDATE, random.uniform(50, 70))
values1.append(val1)
values2.append(val2)
print('Sending values...')
ocs_client.Streams.insertValues(
namespace_id,
SAMPLE_STREAM_ID_1,
str(values1).replace("'", ""))
ocs_client.Streams.insertValues(
namespace_id,
SAMPLE_STREAM_ID_2,
str(values2).replace("'", ""))
return (sample_start_time, sample_end_time) | c0b01e36e350d152d758a744735688d15826be06 | 8,062 |
def enhanced_feature_extractor_digit(datum):
"""Feature extraction playground for digits.
You should return a util.Counter() of features
for this datum (datum is of type samples.Datum).
## DESCRIBE YOUR ENHANCED FEATURES HERE...
"""
features = basic_feature_extractor_digit(datum)
"*** YOUR CODE HERE ***"
util.raise_not_defined()
return features | b98ef2caf6b51176fae18ae36dd0c316ab2d8ee7 | 8,063 |
from typing import Union
def linear_resample(x: Union[ivy.Array, ivy.NativeArray], num_samples: int, axis: int = -1, f: ivy.Framework = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Performs linear re-sampling on input image.
:param x: Input array
:type x: array
:param num_samples: The number of interpolated samples to take.
:type num_samples: int
:param axis: The axis along which to perform the resample. Default is last dimension.
:type axis: int, optional
:param f: Machine learning framework. Inferred from inputs if None.
:type f: ml_framework, optional
:return: The array after the linear resampling.
"""
return _cur_framework(x, f=f).linear_resample(x, num_samples, axis) | bd6b54ee5cafe5409eb6aa47da57db2ad3b7fff2 | 8,064 |
def update_milestones(repo, username=None, namespace=None):
"""Update the milestones of a project."""
repo = flask.g.repo
form = pagure.forms.ConfirmationForm()
error = False
if form.validate_on_submit():
redirect = flask.request.args.get("from")
milestones = flask.request.form.getlist("milestones")
miles = {}
keys = []
for idx in milestones:
milestone = flask.request.form.get(
"milestone_%s_name" % (idx), None
)
date = flask.request.form.get("milestone_%s_date" % (idx), None)
active = (
True
if flask.request.form.get("milestone_%s_active" % (idx))
else False
)
if milestone and milestone.strip():
milestone = milestone.strip()
if milestone in miles:
flask.flash(
"Milestone %s is present multiple times" % milestone,
"error",
)
error = True
break
miles[milestone] = {
"date": date.strip() if date else None,
"active": active,
}
keys.append(milestone)
if not error:
try:
repo.milestones = miles
repo.milestones_keys = keys
flask.g.session.add(repo)
flask.g.session.commit()
flask.flash("Milestones updated")
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
flask.flash(str(err), "error")
if redirect == "issues":
return flask.redirect(
flask.url_for(
"ui_ns.view_issues",
username=username,
repo=repo.name,
namespace=namespace,
)
)
return flask.redirect(
flask.url_for(
"ui_ns.view_settings",
username=username,
repo=repo.name,
namespace=namespace,
)
+ "#roadmap-tab"
) | 4869d70a4d8bd85436639dd3214f208822f7241b | 8,066 |
def trapezoid_vectors(t, depth, big_t, little_t):
"""Trapezoid shape, in the form of vectors, for model.
Parameters
----------
t : float
Vector of independent values to evaluate trapezoid model.
depth : float
Depth of trapezoid.
big_t : float
Full trapezoid duration.
little_t : float
Ingress/egress duration.
Returns
-------
output : float
Vector of trapezoid model values.
"""
output = np.full_like(t, 1.0)
t = np.abs(t)
big_t_half = big_t * 0.5
little_t_half = little_t * 0.5
one_minus_depth = 1.0 - depth
output = np.where(t <= big_t_half - little_t_half, one_minus_depth, output)
return np.where(
np.logical_and(t > big_t_half - little_t_half,
t < big_t_half + little_t_half),
one_minus_depth + ((depth / little_t) *
(t - big_t_half + little_t_half)),
output) | 759c7cf946bf9ea998644bea9b28f46dea5a6e55 | 8,068 |
def get_rules(clf, class_names, feature_names):
"""
Extracts the rules from a decision tree classifier.
The keyword arguments correspond to the objects returned by
tree.build_tree.
Keyword arguments:
clf: A sklearn.tree.DecisionTreeClassifier.
class_names: A list(str) containing the class names.
feature_names: A list(str) containing the feature names.
Returns:
A list(str) where each element is a rule describing a leaf node.
"""
tree = clf.tree_
rules = traverse(tree, 0, class_names, feature_names, [], [], [], [])
rules = prune_rules(rules, feature_names)
n_rules = len(rules)
print('\tExtracted', n_rules, 'rule' + ('s.' if n_rules > 1 else '.'))
rules_str = []
for (features, thresholds, decisions, class_name) in rules:
rule = lists2rule(features, thresholds, decisions, class_name)
rules_str.append(rule)
return rules_str | a4d9bc1964553d384f1c795e2ad4834a532ddbba | 8,069 |
def file_parser(localpath = None, url = None, sep = " ", delimiter = "\t"):
"""
DOCSTRING:
INPUT:
> 'localpath' : String (str). Ideally expects a local object with a read() method (such as a file handle or StringIO).
By default, 'localpath=dummy_file' parameter can be passed to auto-detect and parse one of our dummy 'Payments' file in Amazon format. Acceptable input file extensions include .CSV, .TSV and .TXT. Needs to be passed in within quotes, either single or double quotes. Default 'dummy_file' doesn't require additional quotes.
> 'url' : [OPTIONAL] String (str). If supplied with value, 'localpath' needs to be left at default 'None' or else shall output an error message. Expected file type contained within URL should be either in .CSV, .TSV and .TXT format. Needs to be passed in within quotes, either single or double quotes. Default 'url=ur' can be passed w/o additional quotes for fetching dummy data.
> 'sep' : [OPTIONAL] String (str). Optional, and isn't expected to be modified unless critical. Powered by Python’s builtin parsing sniffer tool.
In addition, separators longer than 1 character and different from '\s+' will be interpreted as regular expressions and will also force the use of the Python parsing engine. Note that regex separators are prone to ignoring quoted data. [Regex example: '\r\t'].
> 'delimiter' : [OPTIONAL] String (str). Parameter isn't expected to be modified (Like setting to 'None') unless critical. Alternative argument name for previous argument 'sep', so a careful choice needs to be made.
OUTPUT:
Shall result into a Pandas DataFrame or TextParser for further data processing.
"""
# Checking existence of 'filepath' or 'url' parameter before parsing:
if localpath == None and url == None:
return "Please input EITHER local file path to 'localpath' parameter OR any valid readable URL to 'url' parameter"
elif localpath != None and url == None:
if localpath.lower().endswith((".txt", ".csv", ".tsv")):
data = pd.read_csv(localpath, sep = sep, delimiter=delimiter, parse_dates=[0], infer_datetime_format=True)
return data
else:
return "This file format is not supported. Kindly refer to our functional flow documentation for further assistance!"
elif localpath == None and url != None:
data = pd.read_csv(url, sep = sep, delimiter=delimiter, parse_dates=[0], infer_datetime_format=True)
return data
else:
return "Please pass valid input for processing." | a1f8cc5fceffdc2a20f745afbce44645634221bd | 8,071 |
def node_to_truncated_gr(node, bin_width=0.1):
"""
Parses truncated GR node to an instance of the
:class: openquake.hazardlib.mfd.truncated_gr.TruncatedGRMFD
"""
# Parse to float dictionary
if not all([node.attrib[key]
for key in ["minMag", "maxMag", "aValue", "bValue"]]):
return None
tgr = dict((key, float_(node.attrib[key])) for key in node.attrib)
return mfd.truncated_gr.TruncatedGRMFD(min_mag=tgr["minMag"],
max_mag=tgr["maxMag"],
bin_width=bin_width,
a_val=tgr["aValue"],
b_val=tgr["bValue"]) | 0fd81e01140ec7b1a38f28c527139b61cc1c3a92 | 8,074 |
def nt(node, tag):
""" returns text of the tag or None if the
tag does not exist """
if node.find(tag) is not None and node.find(tag).text is not None:
return node.find(tag).text
else:
return None | 7ca5f83cf18f918f594374fa2aa875415238eef6 | 8,075 |
def set_user_favorites(username, **_):
"""
Sets the user's Favorites
Variables:
username => Name of the user you want to set the favorites for
Arguments:
None
Data Block:
{ # Dictionary of
"alert": [
"<name_of_query>": # Named queries
"*:*", # The actual query to run
...
}
Result example:
{
"success": true # Was saving the favorites successful ?
}
"""
data = request.json
favorites = {
"alert": [],
"search": [],
"signature": [],
"submission": [],
"error": []
}
for key in data:
if key not in favorites:
return make_api_response("", err="Invalid favorite type (%s)" % key, status_code=400)
favorites.update(data)
return make_api_response({"success": STORAGE.user_favorites.save(username, data)}) | 391ff8e9736bb2baddbf388c5a203cf9d2d7bdcc | 8,076 |
def delete_token(token_id):
"""Revoke a specific token in the application auth database.
:type token_id: str
:param token_id: Token identifier
:rtype: tuple
:return: None, status code
"""
client_data = g.client_data
if not valid_token_id(token_id):
raise MalformedTokenIdException
token = current_app.auth_db.lookup_token(token_id)
if token is None:
raise TokenNotFoundException
if not isinstance(token, Token):
raise InternalServerErrorException("auth_db.lookup_token did not return a token object")
if "admin" in client_data.roles:
current_app.auth_db.revoke_token(token_id)
else:
if token.client_id != client_data.client_id:
raise InadequateRolesException("Cannot revoke a token which you do not own")
current_app.auth_db.revoke_token(token_id)
return "", 204 | 204f4ae2c0dc7c704f05baa86930bc7962d1b639 | 8,077 |
from typing import Optional
async def update_country(identifier: Optional[str] = None, name: Optional[str] = None, capital: Optional[str] = None,
country: UpdateCountryModel = Body(...), current_user: AdminModel = Depends(get_current_user)):
"""
Update a country by name or capital name:
- **current user** should be admin
- **name**: country name
- **capital**: capital name of the country
"""
variables = locals()
options = {'identifier': '_id', 'name': 'name', 'capital': 'capital'}
for key in variables.keys():
if variables[key] is not None:
return await update_object({options[key]: variables[key]}, country, 'countries')
raise HTTPException(status_code=404, detail='Set some parameters') | 7805ca1d8e99e21b2558258e890f11fb4f697df9 | 8,078 |
def twoSum(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
"""method-1 time O(n), traverse all, get rest"""
for i in range(len(nums)):
res = target - nums[i]
if res in nums:
return [i, nums.index(res)]
else:
return [] | 8c5cd095c7800fa5da698dffa0d76d3c00a8a3c1 | 8,079 |
from miniworld.util import ConcurrencyUtil
def wait_until_uds_reachable(uds_path, return_sock=False):
""" Wait until the unix domain socket at `uds_path` is reachable.
Returns
-------
socket.socket
"""
sock = ConcurrencyUtil.wait_until_fun_returns_true(lambda x: x[0] is True, uds_reachable, uds_path,
return_sock=return_sock)[1]
return sock | 574053ed1b4ccacda37bec58740ad497e690746a | 8,082 |
def get_relation_count_df(
dataset: Dataset,
merge_subsets: bool = True,
add_labels: bool = True,
) -> pd.DataFrame:
"""Create a dataframe with relation counts.
:param dataset:
The dataset.
:param add_labels:
Whether to add relation labels to the dataframe.
:param merge_subsets:
Whether to merge subsets, i.e., train/validation/test.
:param add_labels:
Whether to add entity / relation labels.
:return:
A dataframe with columns (relation_id, count, relation_label?, subset?)
"""
return _common(
dataset=dataset,
triple_func=triple_analysis.get_relation_counts,
merge_subsets=merge_subsets,
add_labels=add_labels,
) | a15c22d0790c346cac3842f84a80ee7c27c4471a | 8,083 |
def get_tests(run_id):
"""
Ручка для получения информации о тест (из тест-рана)
Выходящий параметр: test_id
"""
client = APIClient('https://testrail.homecred.it')
client.user = 'dmitriy.zverev@homecredit.ru'
client.password = 'Qwerty_22'
tests = client.send_get('get_tests/%s' % run_id)
return tests | a188e8a04dc72e485d5c519c09fcdbd8f2f18f31 | 8,084 |
def other(player):
"""Return the other player, for a player PLAYER numbered 0 or 1.
>>> other(0)
1
>>> other(1)
0
"""
return 1 - player | 08503c35276cf86efa15631bb6b893d72cbae4d5 | 8,086 |
def _explored_parameters_in_group(traj, group_node):
"""Checks if one the parameters in `group_node` is explored.
:param traj: Trajectory container
:param group_node: Group node
:return: `True` or `False`
"""
explored = False
for param in traj.f_get_explored_parameters():
if param in group_node:
explored = True
break
return explored | 71cbafbad0dcc3fa9294c0bede5f6a09941d452b | 8,087 |
def _execute(query,
data=None,
config_file=DEFAULT_CONFIG_FILE):
"""Execute SQL query on a postgres db"""
# Connect to an existing database.
postgres_db_credentials = postgres_db(config_file)
conn = psycopg2.connect(dbname=postgres_db_credentials["dbname"],
user=postgres_db_credentials["user"],
password=postgres_db_credentials["password"],
host=postgres_db_credentials["host"],
port=postgres_db_credentials["port"])
# Open a cursor to perform database operations.
cur = conn.cursor()
if data is None:
cur.execute(query)
elif isinstance(data, list):
execute_values(cur, query, data, template=None, page_size=100)
else:
cur.execute(query, data)
conn.commit()
if cur.description is None:
result = None
elif len(cur.description) == 1:
result, = cur.fetchone()
else:
result = cur.fetchall()
cur.close()
conn.close()
return result | 84884b6a0902ce7fe964b145f3124a1699f72453 | 8,088 |
from pathlib import Path
def _construct_out_filename(fname, group_name):
"""
Construct a specifically formatted output filename.
The vrt will be placed adjacent to the HDF5 file, as
such write access is required.
"""
basedir = fname.absolute().parent
basename = fname.with_suffix('.vrt').name.replace(
'wagl',
group_name
)
out_fname = basedir.joinpath(Path(basename))
return out_fname | 117bb8470ab65f0b9fb11bb3151ae653e5e28d23 | 8,089 |
import json
def _deposit_need_factory(name, **kwargs):
"""Generate a JSON argument string from the given keyword arguments.
The JSON string is always generated the same way so that the resulting Need
is equal to any other Need generated with the same name and kwargs.
"""
if kwargs:
for key, value in enumerate(kwargs):
if value is None:
del kwargs[key]
if not kwargs:
argument = None
else:
argument = json.dumps(kwargs, separators=(',', ':'), sort_keys=True)
return ParameterizedActionNeed(name, argument) | 9c8813f0be657b51a787d9badd2f677aca84a002 | 8,090 |
def not_equal(version1, version2):
"""
Evaluates the expression: version1 != version2.
:type version1: str
:type version2: str
:rtype: bool
"""
return compare(version1, '!=', version2) | 5dab948ec2a3eb8d3cb68fcd9887aedb394757df | 8,091 |
def get_sp_list():
"""
Gets all tickers from S&P 500
"""
bs = get_soup('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
sp_companies = bs.find_all('a', class_="external text")
return sp_companies | be850de6fc787faaa05bbd3100dc82ce56cceb22 | 8,092 |
def get_params_nowcast(
to, tf,
i, j,
path, nconst,
depthrange='None',
depav=False, tidecorr=tidetools.CorrTides):
"""This function loads all the data between the start and the end date that
contains hourly velocities in the netCDF4 nowcast files in the specified
depth range. Then masks, rotates and unstaggers the time series. The
unstaggering causes the shapes of the returned arrays to be 1 less than
those of the input arrays in the y and x dimensions. Finally it calculates
tidal ellipse parameters from the u and v time series. Maintains the shape
of the velocities enters only loosing the time dimensions.
:arg to: The beginning of the date range of interest
:type to: datetime object
:arg tf: The end of the date range of interest
:type tf: datetime object
:arg i: x index, must have at least 2 values for unstaggering, will loose
the first i during the unstaggering in prepare_vel.
:type i: float or list
:arg j: y index, must have at least 2 values for unstaggering, will loose
the first j during the unstaggering in prepare_vel.
:type j: float or list
:arg path: Defines the path used(eg. nowcast)
:type path: string
:arg depthrange: Depth values of interest in meters as a float for a single
depth or a list for a range. A float will find the closest depth that
is <= the value given. Default is 'None' for the whole water column
(0-441m).
:type depav: float, string or list.
:arg depav: True will depth average over the whole depth profile given.
Default is False.
:type depav: boolean
:arg depth: depth vector corresponding to the depth of the velocities, only
requiered if depav=True.
:type depth: :py:class:'np.ndarray' or string
:returns: params, dep
params is dictionary object of the ellipse parameters for each constituent
dep is the depths of the ellipse paramters
"""
u, v, time, dep = ellipse_files_nowcast(
to, tf,
i, j,
path,
depthrange=depthrange)
u_u, v_v = prepare_vel(u, v, depav=depav, depth=dep)
params = get_params(u_u, v_v, time, nconst, tidecorr=tidecorr)
return params, dep | 4cf44961da3109593176476d8e4092a2c05b7a18 | 8,093 |
def convert_size(size):
""" Helper function to convert ISPMan sizes to readable units. """
return number_to_human_size(int(size)*1024) | a28c8332d8f44071409436f4ec7e844a58837f49 | 8,094 |
def get_suppressed_output(
detections,
filter_id: int,
iou: float,
confidence: float,
) -> tuple:
"""Filters detections based on the intersection of union theory.
:param detections: The tensorflow prediction output.
:param filter_id: The specific class to be filtered.
:param iou: The intersection of union threshold.
:param confidence: The confidence threshold.
:returns: tuple of suppressed bbox, suppressed scores and suppressed classes.
"""
detection_masks = (
detections["detection_masks"]
if "detection_masks" in detections
else None
)
detection_boxes = detections["detection_boxes"]
detection_scores = detections["detection_scores"]
detection_classes = detections["detection_classes"]
return (
_non_max_suppress_bbox(
bbox=detection_boxes,
scores=detection_scores,
classes=detection_classes,
filter_class=filter_id,
iou=iou,
confidence=confidence,
)
if detection_masks is None
else _non_max_suppress_mask(
bbox=detection_boxes,
scores=detection_scores,
classes=detection_classes,
masks=detection_masks,
filter_class=filter_id,
iou=iou,
confidence=confidence,
)
) | b6a294611ec22fd48a7a72e51e66e43732c1d3f7 | 8,095 |
def tf_nan_func(func, **kwargs):
"""
takes function with X as input parameter and applies function only on
finite values,
helpful for tf value calculation which can not deal with nan values
:param func: function call with argument X
:param kwargs: other arguments for func
:return: executed func output with nan values
"""
mask = tfm.is_finite(kwargs["X"])
empty_t = tf.cast(tf.fill(mask.shape, np.nan), dtype=kwargs["X"].dtype)
for i in kwargs:
# workaround of tf.rank(kwargs[i]) > 0, avoid scalar value in mask
if kwargs[i].shape != ():
# keep only finite
kwargs[i] = tf.boolean_mask(kwargs[i], tfm.is_finite(kwargs[i]))
res_func = func(**kwargs)
full_t = tf.tensor_scatter_nd_update(empty_t, tf.where(mask), res_func)
return full_t | d43e509a142bf78025d32984c9ecb0c0856e9a90 | 8,096 |
def deep_update(original,
new_dict,
new_keys_allowed=False,
allow_new_subkey_list=None,
override_all_if_type_changes=None):
"""Updates original dict with values from new_dict recursively.
If new key is introduced in new_dict, then if new_keys_allowed is not
True, an error will be thrown. Further, for sub-dicts, if the key is
in the allow_new_subkey_list, then new subkeys can be introduced.
Args:
original (dict): Dictionary with default values.
new_dict (dict): Dictionary with values to be updated
new_keys_allowed (bool): Whether new keys are allowed.
allow_new_subkey_list (Optional[List[str]]): List of keys that
correspond to dict values where new subkeys can be introduced.
This is only at the top level.
override_all_if_type_changes(Optional[List[str]]): List of top level
keys with value=dict, for which we always simply override the
entire value (dict), iff the "type" key in that value dict changes.
"""
allow_new_subkey_list = allow_new_subkey_list or []
override_all_if_type_changes = override_all_if_type_changes or []
for k, value in new_dict.items():
if k not in original and not new_keys_allowed:
raise Exception("Unknown config parameter `{}` ".format(k))
# Both orginal value and new one are dicts.
if isinstance(original.get(k), dict) and isinstance(value, dict):
# Check old type vs old one. If different, override entire value.
if k in override_all_if_type_changes and \
"type" in value and "type" in original[k] and \
value["type"] != original[k]["type"]:
original[k] = value
# Allowed key -> ok to add new subkeys.
elif k in allow_new_subkey_list:
deep_update(original[k], value, True)
# Non-allowed key.
else:
deep_update(original[k], value, new_keys_allowed)
# Original value not a dict OR new value not a dict:
# Override entire value.
else:
original[k] = value
return original | 12573fd3efef4fc9d6c222ccc3ea525c131a2088 | 8,097 |
def queued_archive_jobs():
"""Fetch the info about jobs waiting in the archive queue.
Returns
-------
jobs: dict
"""
jobs = pbs_jobs()
return [
job
for job in jobs
if (job["job_state"] == "Q" and job["queue"] == "archivelong")
] | af4495f6484cf2e819655a1807a38556f62119a5 | 8,098 |
def getCustomKernelSolutionObj(kernelName, directory=globalParameters["CustomKernelDirectory"]):
"""Creates the Solution object for a custom kernel"""
kernelConfig = getCustomKernelConfig(kernelName, directory)
for k, v in kernelConfig.items():
if k != "ProblemType":
checkParametersAreValid((k, [v]), validParameters)
kernelConfig["KernelLanguage"] = "Assembly"
kernelConfig["CustomKernelName"] = kernelName
return Solution(kernelConfig) | 31cec952f3dc5afefa5a50bc8a54fe00eb3d3fe9 | 8,099 |
def _format_breed_name(name):
"""
Format breed name for displaying
INPUT
name: raw breed name, str
OUTPUT
name : cleaned breed name, str
"""
return name.split('.')[1].replace('_', ' ') | 0c2680de9bd19e61d717fb84c1ce01e5095ddf35 | 8,101 |
def create_pid_redirected_error_handler():
"""Creates an error handler for `PIDRedirectedError` error."""
def pid_redirected_error_handler(e):
try:
# Check that the source pid and the destination pid are of the same
# pid_type
assert e.pid.pid_type == e.destination_pid.pid_type
# Redirection works only for the item route of the format
# `/records/<pid_value>`
location = url_for(
request.url_rule.endpoint,
pid_value=e.destination_pid.pid_value
)
data = dict(
status=301,
message='Moved Permanently.',
location=location,
)
response = make_response(jsonify(data), data['status'])
response.headers['Location'] = location
return response
except (AssertionError, BuildError, KeyError):
raise e
return pid_redirected_error_handler | 3137c4d6447c5da9f500b3d4cd7a6a3a68325a92 | 8,103 |
def is_var_name_with_greater_than_len_n(var_name: str) -> bool:
"""
Given a variable name, return if this is acceptable according to the
filtering heuristics.
Here, we try to discard variable names like X, y, a, b etc.
:param var_name:
:return:
"""
unacceptable_names = {}
if len(var_name) < min_var_name_len:
return False
elif var_name in unacceptable_names:
return False
return True | c4509b33cc7326c1709f526137e04a590cf3c7ad | 8,104 |
from re import L
def stacked_L(robot: RobotPlanar, q: list, q_goal: list):
"""
Stacks the L matrices for conviencne
"""
LL = []
LLinv = []
Ts_ee = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q))
Ts_goal = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q_goal))
for ee in robot.end_effectors:
T_0_ee = SE2_to_SE3(Ts_ee[ee[0]])
Re = T_0_ee[0:3, 0:3]
T_0_goal = SE2_to_SE3(Ts_goal[ee[0]])
Rd = T_0_goal[0:3, 0:3]
ll, llinv = L(Rd, Re)
LL.append(np.eye(3))
LLinv.append(np.eye(3))
LL.append(ll)
LLinv.append(llinv)
LL = block_diag(*LL)
LLinv = block_diag(*LLinv)
return LL, LLinv | a329ad79b9add95307195329b937a85cf9eeda50 | 8,105 |
from typing import Dict
async def help() -> Dict:
"""Shows this help message."""
return {
'/': help.__doc__,
'/help': help.__doc__,
'/registration/malaysia': format_docstring(get_latest_registration_data_malaysia.__doc__),
'/registration/malaysia/latest': format_docstring(get_latest_registration_data_malaysia.__doc__),
'/registration/malaysia/{date}': format_docstring(get_registration_data_malaysia.__doc__),
'/vaccination/malaysia': format_docstring(get_latest_vax_data_malaysia.__doc__),
'/vaccination/malaysia/latest': format_docstring(get_latest_vax_data_malaysia.__doc__),
'/vaccination/malaysia/{date}': format_docstring(get_vax_data_malaysia.__doc__),
'/registration/state': format_docstring(get_latest_registration_data_state.__doc__),
'/registration/state/all/latest': format_docstring(get_latest_registration_data_state.__doc__),
'/registration/state/all/{date}': format_docstring(get_registration_data_all_state.__doc__),
'/registration/state/{state}/latest': format_docstring(get_latest_registration_data_for_state.__doc__),
'/registration/state/{state}/{date}': format_docstring(get_registration_data_state.__doc__),
'/vaccination/state': format_docstring(get_latest_vax_data_state.__doc__),
'/vaccination/state/all/latest': format_docstring(get_latest_vax_data_state.__doc__),
'/vaccination/state/all/{date}': format_docstring(get_vax_data_all_state.__doc__),
'/vaccination/state/{state}/latest': format_docstring(get_latest_vax_data_for_state.__doc__),
'/vaccination/state/{state}/{date}': format_docstring(get_vax_data_state.__doc__),
} | 4d72c66069956c469aea1d39fd68e20454f68e40 | 8,106 |
def eliminate_arrays(clusters, template):
"""
Eliminate redundant expressions stored in Arrays.
"""
mapper = {}
processed = []
for c in clusters:
if not c.is_dense:
processed.append(c)
continue
# Search for any redundant RHSs
seen = {}
for e in c.exprs:
f = e.lhs.function
if not f.is_Array:
continue
v = seen.get(e.rhs)
if v is not None:
# Found a redundant RHS
mapper[f] = v
else:
seen[e.rhs] = f
if not mapper:
# Do not waste time
processed.append(c)
continue
# Replace redundancies
subs = {}
for f, v in mapper.items():
for i in filter_ordered(i.indexed for i in c.scope[f]):
subs[i] = v[f.indices]
exprs = []
for e in c.exprs:
if e.lhs.function in mapper:
# Drop the write
continue
exprs.append(e.xreplace(subs))
processed.append(c.rebuild(exprs))
return processed | 2d5a59d9d5758963e029cc102d6a123c62ed8758 | 8,107 |
def data_generator(batch_size):
"""
Args:
dataset: Dataset name
seq_length: Length of sequence
batch_size: Size of batch
"""
vocab_size = 20000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=vocab_size)
x_train, y_train, x_test, y_test = tf.ragged.constant(x_train), tf.constant(y_train[..., None]), \
tf.ragged.constant(x_test), tf.constant(y_test[..., None])
# Shuffle only train dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) \
.shuffle(batch_size * 100).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
return train_dataset, test_dataset, vocab_size | ed1edfd0cbeac01bd1fcad6cc1fe36a94dc006e8 | 8,108 |
from datetime import datetime
def now():
""" Get current timestamp
Returns:
str: timestamp string
"""
current_time = datetime.now()
str_date = current_time.strftime("%d %B %Y, %I:%M:%S %p")
return str_date | 4c487416fa119cae0c5310678dfd96e0f737b937 | 8,109 |
def open_mfdataset(
fname,
convert_to_ppb=True,
mech="cb6r3_ae6_aq",
var_list=None,
fname_pm25=None,
surf_only=False,
**kwargs
):
# Like WRF-chem add var list that just determines whether to calculate sums or not to speed this up.
"""Method to open RFFS-CMAQ dyn* netcdf files.
Parameters
----------
fname : string or list
fname is the path to the file or files. It will accept hot keys in
strings as well.
convert_to_ppb : boolean
If true the units of the gas species will be converted to ppbv
mech: str
Mechanism to be used for calculating sums. Mechanisms supported:
"cb6r3_ae6_aq"
var_list: list
List of variables to include in output. MELODIES-MONET only reads in
variables need to plot in order to save on memory and simulation cost
especially for vertical data. If None, will read in all model data and
calculate all sums.
fname_pm25: string or list
Optional path to the file or files for precalculated PM2.5 sums. It
will accept hot keys in strings as well.
surf_only: boolean
Whether to save only surface data to save on memory and computational
cost (True) or not (False).
Returns
-------
xarray.DataSet
RRFS-CMAQ model dataset in standard format for use in MELODIES-MONET
"""
# Get dictionary of summed species for the mechanism of choice.
dict_sum = dict_species_sums(mech=mech)
if var_list is not None:
# Read in only a subset of variables and only do calculations if needed.
var_list_orig = var_list.copy() # Keep track of the original list before changes.
list_calc_sum = []
list_remove_extra = [] # list of variables to remove after the sum to save in memory.
for var_sum in [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]:
if var_sum in var_list:
if var_sum == "PM25":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
elif var_sum == "PM10":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
else:
var_list.extend(dict_sum[var_sum])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum[var_sum])
var_list.remove(var_sum)
list_calc_sum.append(var_sum)
# append the other needed species.
var_list.append("lat")
var_list.append("lon")
var_list.append("phalf")
var_list.append("tmp")
var_list.append("pressfc")
var_list.append("dpres")
var_list.append("hgtsfc")
var_list.append("delz")
# Remove duplicates just in case:
var_list = list(dict.fromkeys(var_list))
list_remove_extra = list(dict.fromkeys(list_remove_extra))
# Select only those elements in list_remove_extra that are not in var_list_orig
list_remove_extra_only = list(set(list_remove_extra) - set(var_list_orig))
# If variables in pm25 files are included remove these as these are not in the main file
# And will be added later.
for pm25_var in [
"PM25_TOT",
"PM25_TOT_NSOM",
"PM25_EC",
"PM25_NH4",
"PM25_NO3",
"PM25_SO4",
"PM25_OC",
"PM25_OM",
]:
if pm25_var in var_list:
var_list.remove(pm25_var)
# open the dataset using xarray
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)[var_list]
else:
# Read in all variables and do all calculations.
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)
list_calc_sum = [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]
if fname_pm25 is not None:
# Add the processed pm2.5 species.
dset_pm25 = xr.open_mfdataset(fname_pm25, concat_dim="time", combine="nested", **kwargs)
dset_pm25 = dset_pm25.drop(
labels=["lat", "lon", "pfull"]
) # Drop duplicate variables so can merge.
# Slight differences in pfull value between the files, but I assume that these still represent the
# same pressure levels from the model dynf* files.
# Attributes are formatted differently in pm25 file so remove attributes and use those from dynf* files.
dset_pm25.attrs = {}
dset = dset.merge(dset_pm25)
# Standardize some variable names
dset = dset.rename(
{
"grid_yt": "y",
"grid_xt": "x",
"pfull": "z",
"phalf": "z_i", # Interface pressure levels
"lon": "longitude",
"lat": "latitude",
"tmp": "temperature_k", # standard temperature (kelvin)
"pressfc": "surfpres_pa",
"dpres": "dp_pa", # Change names so standard surfpres_pa and dp_pa
"hgtsfc": "surfalt_m",
"delz": "dz_m",
}
) # Optional, but when available include altitude info
# Calculate pressure. This has to go before sorting because ak and bk
# are not sorted as they are in attributes
dset["pres_pa_mid"] = _calc_pressure(dset)
# Adjust pressure levels for all models such that the surface is first.
dset = dset.sortby("z", ascending=False)
dset = dset.sortby("z_i", ascending=False)
# Note this altitude calcs needs to always go after resorting.
# Altitude calculations are all optional, but for each model add values that are easy to calculate.
dset["alt_msl_m_full"] = _calc_hgt(dset)
dset["dz_m"] = dset["dz_m"] * -1.0 # Change to positive values.
# Set coordinates
dset = dset.reset_index(
["x", "y", "z", "z_i"], drop=True
) # For now drop z_i no variables use it.
dset["latitude"] = dset["latitude"].isel(time=0)
dset["longitude"] = dset["longitude"].isel(time=0)
dset = dset.reset_coords()
dset = dset.set_coords(["latitude", "longitude"])
# These sums and units are quite expensive and memory intensive,
# so add option to shrink dataset to just surface when needed
if surf_only:
dset = dset.isel(z=0).expand_dims("z", axis=1)
# Need to adjust units before summing for aerosols
# convert all gas species to ppbv
if convert_to_ppb:
for i in dset.variables:
if "units" in dset[i].attrs:
if "ppmv" in dset[i].attrs["units"]:
dset[i] *= 1000.0
dset[i].attrs["units"] = "ppbv"
# convert "ug/kg to ug/m3"
for i in dset.variables:
if "units" in dset[i].attrs:
if "ug/kg" in dset[i].attrs["units"]:
# ug/kg -> ug/m3 using dry air density
dset[i] = dset[i] * dset["pres_pa_mid"] / dset["temperature_k"] / 287.05535
dset[i].attrs["units"] = r"$\mu g m^{-3}$"
# add lazy diagnostic variables
# Note that because there are so many species to sum. Summing the aerosols is slowing down the code.
if "PM25" in list_calc_sum:
dset = add_lazy_pm25(dset, dict_sum)
if "PM10" in list_calc_sum:
dset = add_lazy_pm10(dset, dict_sum)
if "noy_gas" in list_calc_sum:
dset = add_lazy_noy_g(dset, dict_sum)
if "noy_aer" in list_calc_sum:
dset = add_lazy_noy_a(dset, dict_sum)
if "nox" in list_calc_sum:
dset = add_lazy_nox(dset, dict_sum)
if "pm25_cl" in list_calc_sum:
dset = add_lazy_cl_pm25(dset, dict_sum)
if "pm25_ec" in list_calc_sum:
dset = add_lazy_ec_pm25(dset, dict_sum)
if "pm25_ca" in list_calc_sum:
dset = add_lazy_ca_pm25(dset, dict_sum)
if "pm25_na" in list_calc_sum:
dset = add_lazy_na_pm25(dset, dict_sum)
if "pm25_nh4" in list_calc_sum:
dset = add_lazy_nh4_pm25(dset, dict_sum)
if "pm25_no3" in list_calc_sum:
dset = add_lazy_no3_pm25(dset, dict_sum)
if "pm25_so4" in list_calc_sum:
dset = add_lazy_so4_pm25(dset, dict_sum)
if "pm25_om" in list_calc_sum:
dset = add_lazy_om_pm25(dset, dict_sum)
# Change the times to pandas format
dset["time"] = dset.indexes["time"].to_datetimeindex(unsafe=True)
# Turn off warning for now. This is just because the model is in julian time
# Drop extra variables that were part of sum, but are not in original var_list
# to save memory and computational time.
# This is only revevant if var_list is provided
if var_list is not None:
if bool(list_remove_extra_only): # confirm list not empty
dset = dset.drop_vars(list_remove_extra_only)
return dset | 59639b4bb45d4c1306ea8ecfa1241b86247ce16b | 8,110 |
def projection(projection_matrix: tf.Tensor,
flattened_vector: tf.Tensor) -> tf.Tensor:
"""Projects `flattened_vector` using `projection_matrix`.
Args:
projection_matrix: A rank-2 Tensor that specifies the projection.
flattened_vector: A flat Tensor to be projected
Returns:
A flat Tensor returned from projection.
"""
return tf.reshape(
projection_matrix @ (tf.transpose(projection_matrix) @ tf.reshape(
flattened_vector, [-1, 1])), [-1]) | 7954247be2f3d130ac79f53e44b0509608fe85d6 | 8,111 |
def free_vars(e):
"""Get free variables from expression e.
Parameters
----------
e: tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of free variables
"""
return _ir_pass.free_vars(e) | bfab6f5ff0ccadf8dba7af518401e6026efbcb20 | 8,112 |
def image_as_uint(im, bitdepth=None):
""" Convert the given image to uint (default: uint8)
If the dtype already matches the desired format, it is returned
as-is. If the image is float, and all values are between 0 and 1,
the values are multiplied by np.power(2.0, bitdepth). In all other
situations, the values are scaled such that the minimum value
becomes 0 and the maximum value becomes np.power(2.0, bitdepth)-1
(255 for 8-bit and 65535 for 16-bit).
"""
if not bitdepth:
bitdepth = 8
if not isinstance(im, np.ndarray):
raise ValueError("Image must be a numpy array")
if bitdepth == 8:
out_type = np.uint8
elif bitdepth == 16:
out_type = np.uint16
else:
raise ValueError("Bitdepth must be either 8 or 16")
dtype_str1 = str(im.dtype)
dtype_str2 = out_type.__name__
if (im.dtype == np.uint8 and bitdepth == 8) or (
im.dtype == np.uint16 and bitdepth == 16
):
# Already the correct format? Return as-is
return im
if dtype_str1.startswith("float") and np.nanmin(im) >= 0 and np.nanmax(im) <= 1:
_precision_warn(dtype_str1, dtype_str2, "Range [0, 1].")
im = im.astype(np.float64) * (np.power(2.0, bitdepth) - 1) + 0.499999999
elif im.dtype == np.uint16 and bitdepth == 8:
_precision_warn(dtype_str1, dtype_str2, "Losing 8 bits of resolution.")
im = np.right_shift(im, 8)
elif im.dtype == np.uint32:
_precision_warn(
dtype_str1,
dtype_str2,
"Losing {} bits of resolution.".format(32 - bitdepth),
)
im = np.right_shift(im, 32 - bitdepth)
elif im.dtype == np.uint64:
_precision_warn(
dtype_str1,
dtype_str2,
"Losing {} bits of resolution.".format(64 - bitdepth),
)
im = np.right_shift(im, 64 - bitdepth)
else:
mi = np.nanmin(im)
ma = np.nanmax(im)
if not np.isfinite(mi):
raise ValueError("Minimum image value is not finite")
if not np.isfinite(ma):
raise ValueError("Maximum image value is not finite")
if ma == mi:
return im.astype(out_type)
_precision_warn(dtype_str1, dtype_str2, "Range [{}, {}].".format(mi, ma))
# Now make float copy before we scale
im = im.astype("float64")
# Scale the values between 0 and 1 then multiply by the max value
im = (im - mi) / (ma - mi) * (np.power(2.0, bitdepth) - 1) + 0.499999999
assert np.nanmin(im) >= 0
assert np.nanmax(im) < np.power(2.0, bitdepth)
return im.astype(out_type) | a906eb7022a1823cd49bedb3858bac34e59fdf02 | 8,113 |
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator | 04bc43c2f5e84db29b7a913de35d7a366464dfda | 8,114 |
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
L2_regularization_cost = (1. / m * lambd / 2) * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
cost = cross_entropy_cost + L2_regularization_cost
return cost | 5904cab44af1768779ed983fa001876d14faeb1d | 8,115 |
from operator import and_
def user_page(num_page=1):
"""Page with list of users route."""
form = SearchUserForm(request.args, meta={'csrf': False})
msg = False
if form.validate():
search_by = int(request.args.get('search_by'))
order_by = int(request.args.get('order_by'))
search_string = str(request.args.get('search'))
if len(search_string) >= MIN_SEARCH_STR:
condition = user_search(search_string, search_by)
else:
condition = ""
if search_string != "":
msg = True
order_list = [User.id, User.role_id, User.delete_date]
order = order_list[order_by]
search_users = db.session.query(User, Role).filter(and_(
User.role_id == Role.id, condition)).order_by(order).paginate(
per_page=PAGINATE_PAGE, page=num_page, error_out=True)
if msg:
flash("Search string is too small", category="danger")
return render_template('user_page.html', form=form, users=search_users,
get="?" + urlencode(request.args))
else:
users = db.session.query(User, Role).filter(
User.role_id == Role.id).order_by(User.id).paginate(
per_page=PAGINATE_PAGE, page=num_page, error_out=True)
return render_template('user_page.html', form=form, users=users,
get="?" + urlencode(request.args)) | 3acf9cc4a274de456ad5ea0ee609857f550867ee | 8,117 |
def validate_input(data: ConfigType) -> dict[str, str] | None:
"""Validate the input by the user."""
try:
SIAAccount.validate_account(data[CONF_ACCOUNT], data.get(CONF_ENCRYPTION_KEY))
except InvalidKeyFormatError:
return {"base": "invalid_key_format"}
except InvalidKeyLengthError:
return {"base": "invalid_key_length"}
except InvalidAccountFormatError:
return {"base": "invalid_account_format"}
except InvalidAccountLengthError:
return {"base": "invalid_account_length"}
except Exception as exc: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception from SIAAccount: %s", exc)
return {"base": "unknown"}
if not 1 <= data[CONF_PING_INTERVAL] <= 1440:
return {"base": "invalid_ping"}
return validate_zones(data) | 56ced7cf0d3b02a484910b599c266e928303ddd7 | 8,118 |
import importlib
def file_and_path_for_module(modulename):
"""Find the file and search path for `modulename`.
Returns:
filename: The filename of the module, or None.
path: A list (possibly empty) of directories to find submodules in.
"""
filename = None
path = []
try:
spec = importlib.util.find_spec(modulename)
except ImportError:
pass
else:
if spec is not None:
if spec.origin != "namespace":
filename = spec.origin
path = list(spec.submodule_search_locations or ())
return filename, path | 4e8d0edb3a5844bb3c523aed66f8eb7f0c646aaa | 8,119 |
def hello_page(request):
"""Simple view to say hello.
It is used to check the authentication system.
"""
text = "Welcome to test_project"
if not request.user.is_anonymous:
text = "Welcome '%s' to test_project" % request.user.username
return HttpResponse(text, content_type='text/plain') | fee98ccca3c89d1f110bc828521cbc26af004325 | 8,120 |
from typing import Dict
def hash_all(bv: Binary_View) -> Dict[str, Function]:
"""
Iterate over every function in the binary and calculate its hash.
:param bv: binary view encapsulating the binary
:return: a dictionary mapping hashes to functions
"""
sigs = {}
for function in bv.functions:
sigs[hash_function(function)] = function
return sigs | 3a3a046c9c7fe786c55d3e0d3993679f8ee71465 | 8,121 |
from typing import Union
from typing import List
def apply_deformation(
deformation_indices: Union[List[bool], np.ndarray], bsf: np.ndarray
) -> np.ndarray:
"""Return Hadamard-deformed bsf at given indices."""
n = len(deformation_indices)
deformed = np.zeros_like(bsf)
if len(bsf.shape) == 1:
if bsf.shape[0] != 2*n:
raise ValueError(
f'Deformation index length {n} does not match '
f'bsf shape {bsf.shape}, which should be {(2*n,)}'
)
for i, deform in enumerate(deformation_indices):
if deform:
deformed[i] = bsf[i + n]
deformed[i + n] = bsf[i]
else:
deformed[i] = bsf[i]
deformed[i + n] = bsf[i + n]
else:
if bsf.shape[1] != 2*n:
raise ValueError(
f'Deformation index length {n} does not match '
f'bsf shape {bsf.shape}, which should be '
f'{(bsf.shape[0], 2*n)}.'
)
for i, deform in enumerate(deformation_indices):
if deform:
deformed[:, i] = bsf[:, i + n]
deformed[:, i + n] = bsf[:, i]
else:
deformed[:, i] = bsf[:, i]
deformed[:, i + n] = bsf[:, i + n]
return deformed | 87e6a3403190f1139fef223d374df5f7e5f59257 | 8,122 |
def index(request):
"""Return the index.html file"""
return render(request, 'index.html') | 7ac6c9418e332aebe29a25c3954152adca3f7716 | 8,123 |
from typing import Optional
from typing import Iterable
from typing import Dict
from typing import Type
import click
def parse_custom_builders(builders: Optional[Iterable[str]]) -> Dict[str, Type[AbstractBuilder]]:
"""
Parse the custom builders passed using the ``--builder NAME`` option on the command line.
:param builders:
"""
custom_builders: Dict[str, Type[AbstractBuilder]] = {}
if builders is None:
return custom_builders
entry_points = get_entry_points()
for builder_name in builders:
if builder_name not in entry_points:
raise click.BadArgumentUsage(
f"Unknown builder {builder_name!r}. \n"
f"Is it registered as an entry point under 'whey.builder'?"
)
else:
custom_builders[builder_name] = entry_points[builder_name].load()
return custom_builders | 95216d12dfeacf319464b4f14be249ab3f12f10a | 8,125 |
def construct_user_rnn_inputs(document_feature_size=10,
creator_feature_size=None,
user_feature_size=None,
input_reward=False):
"""Returns user RNN inputs.
Args:
document_feature_size: Integer, length of document features.
creator_feature_size: Integer or None, length of creator features. If None,
no features about creators will be input.
user_feature_size: Integer or None, length of user features. If None, no
features about users will be input.
input_reward: Boolean, whether to input previous reward to RNN layer.
"""
# Previous consumed document.
rnn_input_doc_feature = tf.keras.layers.Input(
shape=(None, document_feature_size), name='user_consumed_doc_feature')
merged_embedding = rnn_input_doc_feature
inputs = [rnn_input_doc_feature]
# Previous consumed document-associated creator.
if creator_feature_size is not None:
# This vector includes creator's observable features and/or creator's hidden
# states inferred by creator model.
merged_embedding, inputs = _merge_inputs(
(None, creator_feature_size), 'user_consumed_doc-creator_feature',
merged_embedding, inputs)
# User current context.
if user_feature_size is not None:
merged_embedding, inputs = _merge_inputs(
(None, user_feature_size), 'user_current_feature', merged_embedding,
inputs)
# Previous reward.
if input_reward:
merged_embedding, inputs = _merge_inputs((None, 1), 'user_previous_reward',
merged_embedding, inputs)
return merged_embedding, inputs | cdc42e86ff7fee9a7487d05badeae1ef995a3357 | 8,126 |
def numpy_read(DATAFILE, BYTEOFFSET, NUM, PERMISSION, DTYPE):
"""
Read NumPy-compatible binary data.
Modeled after MatSeis function read_file in util/waveread.m.
"""
f = open(DATAFILE, PERMISSION)
f.seek(BYTEOFFSET, 0)
data = np.fromfile(f, dtype=np.dtype(DTYPE), count=NUM)
f.close()
return data | 9fc4b2de3eecefc649cb78fe7d8b545a09b8f786 | 8,127 |
def _process_pmid(s: str, sep: str = '|', prefix: str = 'pubmed:') -> str:
"""Filter for PubMed ids.
:param s: string of PubMed ids
:param sep: separator between PubMed ids
:return: PubMed id
"""
for identifier in s.split(sep):
identifier = identifier.strip()
if identifier.startswith(prefix):
return identifier | 9a1fc49bf570c81f10b6b5470620d7fc0b54275e | 8,128 |
import ast
def _get_import(name, module: ast.Module):
"""
get from import by name
"""
for stm in ast.walk(module):
if isinstance(stm, ast.ImportFrom):
for iname in stm.names:
if isinstance(iname, ast.alias):
if iname.name == name:
return 'from ' + str(stm.module) + ' import ' + name
if isinstance(stm, ast.Import):
pass
return None | bc33a882c65f7fe44d446376db3a71631629ff04 | 8,129 |
from typing import Optional
from typing import Tuple
def number_of_qucosa_metadata_in_elasticsearch(
host: str = SLUB_ELASTICSEARCH_SERVER_URL,
http_auth: Optional[Tuple[str, str]] = None,
index_name: str = "fulltext_qucosa",
) -> int:
"""Return the number of qucosa documents currently available at the SLUB elastic search server.
Parameters
----------
host: str = SLUB_ELASTICSEARCH_SERVER_URL
The hostname of the ElasticSearch server
http_auth: Optional[Tuple[str, str]]
Http basic auth parameters as tuple of username and password. If http_auth is None, but environment variables
`SLUB_ELASTICSEARCH_SERVER_USER` and `SLUB_ELASTICSEARCH_SERVER_PASSWORD` are set, then these are used as
username and password.
index_name: str = "fulltext_qucosa"
The name of the ElasticSearch index to be queried.
Returns
-------
int
the number of qucosa documents
"""
es_server = _initialize_elasticsearch_connection(host, http_auth)
return es_server.count(index=index_name, body={"query": {"match_all": {}}})["count"] | 9e3628998f7c93d12b4855ec2d2c88278b1a5e2a | 8,131 |
def _tuple_to_string(tup):
"""
Converts a tuple of pitches to a string
Params:
* tup (tuple): a tuple of pitch classes, like (11, 10, 5, 9, 3)
Returns:
* string: e.g., 'et593'
"""
def _convert(pitch):
pitch = mod_12(pitch)
if pitch not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11):
# should never happen
raise ValueError('unexpected pitch found: %s' % pitch)
if pitch == 10:
return 't'
elif pitch == 11:
return 'e'
else:
return str(pitch)
output = []
for pitch in tup:
output.append(_convert(pitch))
return ''.join(output) | 61ee32199b85fe5ec645887641d6b28ff701eabd | 8,133 |
def dashboard():
"""Logged in Dashboard screen."""
session["redis_test"] = "This is a session variable."
return render_template(
"dashboard.jinja2",
title="Flask-Session Tutorial.",
template="dashboard-template",
current_user=current_user,
body="You are now logged in!",
) | 48472e2ad8c3b81adab98524103959a812ab9b30 | 8,134 |
def choose_top_k(scores_flat, config):
"""Chooses the top-k beams as successors.
"""
next_beam_scores, word_indices = tf.nn.top_k(scores_flat, k=config.beam_width)
return next_beam_scores, word_indices | e8bbf86c8452b0b2153f591968370612986673e2 | 8,135 |
def train_valid_test_split(data, proportions='50:25:25'):
"""
Splits the data into 3 parts - training, validation and test sets
:param proportions: proportions for the split, like 2:1:1 or 50:30:20
:param data: preprocessed data
:return: X_train, Y_train, target_rtns_train, X_valid, Y_valid, target_rtns_valid, X_test, Y_test, target_rtns_test
"""
features = [c for c in data.columns if c not in ('ret','bin')]
n = len(data)
borders = [float(p) for p in proportions.split(':')]
borders = borders / np.sum(borders)
train_ids = (0, int(np.floor(n * borders[0])))
valid_ids = (train_ids[1] + 1, int(np.floor(n * np.sum(borders[:2]))))
test_ids = (valid_ids[1] + 1, n)
X_train = data[features].iloc[train_ids[0]:train_ids[1], :]
X_valid = data[features].iloc[valid_ids[0]:valid_ids[1], :]
X_test = data[features].iloc[test_ids[0]:test_ids[1], :]
Y_train = data.bin.iloc[train_ids[0]:train_ids[1]]
Y_valid = data.bin.iloc[valid_ids[0]:valid_ids[1]]
Y_test = data.bin.iloc[test_ids[0]:test_ids[1]]
target_rtns_train = data.ret.iloc[train_ids[0]:train_ids[1]]
target_rtns_valid = data.ret.iloc[valid_ids[0]:valid_ids[1]]
target_rtns_test = data.ret.iloc[test_ids[0]:test_ids[1]]
return X_train, Y_train, target_rtns_train, X_valid, Y_valid, target_rtns_valid, X_test, Y_test, target_rtns_test | b8a9d160860aea9c224b72af32ef843b43b44656 | 8,136 |
def data(*args, **kwargs):
"""
The HTML <data> Element links a given content with a
machine-readable translation. If the content is time- or
date-related, the <time> must be used.
"""
return el('data', *args, **kwargs) | c948ea946b29369b78fbda0564a822d7b9bb0a06 | 8,139 |
import copy
def get_crops(nodules, fmt='raw', nodule_shape=(32, 64, 64), batch_size=20, share=0.5, histo=None,
variance=(36, 144, 144), hu_lims=(-1000, 400), **kwargs):
""" Get pipeline that performs preprocessing and crops cancerous/non-cancerous nodules in
a chosen proportion.
Parameters
----------
nodules : pd.DataFrame
contains:
- 'seriesuid': index of patient or series.
- 'z','y','x': coordinates of nodules center.
- 'diameter': diameter, in mm.
fmt : str
can be either 'raw', 'blosc' or 'dicom'.
nodule_shape : tuple, list or ndarray of int
crop shape along (z,y,x).
batch_size : int
number of nodules in batch generated by pipeline.
share : float
share of cancer crops in the batch.
histo : tuple
:func:`numpy.histogramdd` output.
Used for sampling non-cancerous crops
variance : tuple, list or ndarray of float
variances of normally distributed random shifts of
nodules' start positions
hu_lims : tuple, list of float
seq of len=2, representing limits of hu-trimming in normalize_hu-action.
**kwargs
spacing : tuple
(z,y,x) spacing after resize.
shape : tuple
(z,y,x) shape after crop/pad.
method : str
interpolation method ('pil-simd' or 'resize').
See :func:`~radio.CTImagesBatch.resize`.
order : None or int
order of scipy-interpolation (<=5), if used.
padding : str
mode of padding, any supported by :func:`numpy.pad`.
Returns
-------
pipeline
"""
# update args of unify spacing
args_unify_spacing = copy(kwargs_default)
args_unify_spacing.update(kwargs)
# set up other args-dicts
args_sample_nodules = dict(nodule_size=nodule_shape, batch_size=batch_size, share=share,
histo=histo, variance=variance)
# set up the pipeline
pipeline = (Pipeline()
.load(fmt=fmt)
.fetch_nodules_info(nodules=nodules)
.unify_spacing(**args_unify_spacing)
.create_mask()
.normalize_hu(min_hu=hu_lims[0], max_hu=hu_lims[1])
.sample_nodules(**args_sample_nodules)
.run(lazy=True, batch_size=RUN_BATCH_SIZE, shuffle=True)
)
return pipeline | 51bc314a8675790f83d0b6b7276e094986317187 | 8,141 |
def get_dict_from_args(args):
"""Extracts a dict from task argument string."""
d = {}
if args:
for k,v in [p.strip().split('=') for p in args.split(',')]:
d[k] = v
return d | 8fb05329f6119393f94215808c6ab9b3116ec759 | 8,142 |
import warnings
import cupy as cp
from netver.utils.cuda_code import cuda_code
def multi_area_propagation_gpu(input_domain, net_model, thread_number=32):
"""
Propagation of the input domain through the network to obtain the OVERESTIMATION of the output bound.
The process is performed applying the linear combination node-wise and the necessary activation functions.
The process is on GPU, completely parallelized on NVIDIA CUDA GPUs and c++ code.
Parameters
----------
input_domain : list
the input domain expressed as a 3-dim matrix. (a) a list of list for each splitted domain;
(b) a list of bound for each input node and (c) a list of two element for the node, lower and upper
net_model : tf.keras.Model
tensorflow model to analyze, the model must be formatted in the 'tf.keras.Model(inputs, outputs)' format
thread_number : int
number of CUDA thread to use for each CUDA block, the choice is free and does not effect the results,
can however effect the performance
Returns:
--------
reshaped_bound : list
the propagated bound in the same format of the input domain (3-dim)
"""
# Ignore the standard warning from CuPy
warnings.filterwarnings("ignore")
# Import the necessary library for the parallelization (Cupy) and also the c++ CUDA code.
# Load network shape, activations and weights
layer_sizes = []
activations = []
full_weights = np.array([])
full_biases = np.array([])
# Iterate on each layer of the network, exluding the input (tf2 stuff)
for layer in net_model.layers[1:]:
# Obtain the activation function list
if layer.activation == tf.keras.activations.linear: activations.append(0)
elif layer.activation == tf.keras.activations.relu: activations.append(1)
elif layer.activation == tf.keras.activations.tanh: activations.append(2)
elif layer.activation == tf.keras.activations.sigmoid: activations.append(3)
# Obtain the netowrk shape as a list
layer_sizes.append(layer.input_shape[1])
# Obtain all the weights for paramters and biases
weight, bias = layer.get_weights()
full_weights = np.concatenate((full_weights, weight.T.reshape(-1)))
full_biases = np.concatenate((full_biases, bias.reshape(-1)))
# Fixe last layer size
layer_sizes.append( net_model.output.shape[1] )
# Initialize the kernel loading the CUDA code
my_kernel = cp.RawKernel(cuda_code, 'my_kernel')
# Convert all the data in cupy array beore the kernel call
max_layer_size = max(layer_sizes)
results_cuda = cp.zeros(layer_sizes[-1] * 2 * len(input_domain), dtype=cp.float32)
layer_sizes = cp.array(layer_sizes, dtype=cp.int32)
activations = cp.array(activations, dtype=cp.int32)
input_domain = cp.array(input_domain, dtype=cp.float32)
full_weights = cp.array(full_weights, dtype=cp.float32)
full_biases = cp.array(full_biases, dtype=cp.float32)
# Define the number of CUDA block
block_number = int(len(input_domain) / thread_number) + 1
# Create and launch the kernel, wait for the sync of all threads
kernel_input = (input_domain, len(input_domain), layer_sizes, len(layer_sizes), full_weights, full_biases, results_cuda, max_layer_size, activations)
my_kernel((block_number, ), (thread_number, ), kernel_input)
cp.cuda.Stream.null.synchronize()
# Reshape the results and convert in numpy array
reshaped_bound = cp.asnumpy(results_cuda).reshape((len(input_domain), net_model.layers[-1].output_shape[1], 2))
#
return reshaped_bound | a81aad5e05b7054c5b7fc5016941ffc6abea5948 | 8,143 |
def opensslCmsSignedDataCreate( conveyedInfoFile, cert, privateKey ):
"""Create a signed CMS encoded object given a conveyed-info file and
base64 encode the response."""
opensslCmdArgs = [ "openssl", "cms", "-sign", "-in", conveyedInfoFile,
"-signer", cert,
"-inkey", privateKey,
"-outform", "der", "-nodetach" ]
conveyedInfoCmsSignedDerBase64 = runOpensslCmd( opensslCmdArgs, [ "base64" ] )
return conveyedInfoCmsSignedDerBase64 | 905ddbec7c252de6169f4fdedab19e0c6818fb39 | 8,144 |
def change_app_header(uri, headers, body):
""" Add Accept header for preview features of Github apps API """
headers["Accept"] = "application/vnd.github.machine-man-preview+json"
return uri, headers, body | 3610d1d482e057ba73a1901aed8430ff35d98f3b | 8,146 |
def fib_fail(n: int) -> int:
"""doesn't work because it's missing the base case"""
return fib_fail(n - 1) + fib_fail(n - 2) | 6e8138b7ce330c9ab191367e3911fe8146240c25 | 8,147 |
def int2str(num, radix=10, alphabet=BASE85):
"""helper function for quick base conversions from integers to strings"""
return NumConv(radix, alphabet).int2str(num) | 6a7b6e7e090cccc20a0e0e3196e81f79cc5dabc5 | 8,148 |
def randomize_onesample(a, n_iter=10000, h_0=0, corrected=True,
random_seed=None, return_dist=False):
"""Nonparametric one-sample T test through randomization.
On each iteration, randomly flip the signs of the values in ``a``
and test the mean against 0.
If ``a`` is two-dimensional, it is assumed to be shaped as
(n_observations, n_tests), and a max-statistic based approach
is used to correct the p values for multiple comparisons over tests.
Parameters
----------
a : array-like
input data to test
n_iter : int
number of randomization iterations
h_0 : float, broadcastable to tests in a
null hypothesis for the group mean
corrected : bool
correct the p values in the case of multiple tests
random_seed : int or None
seed to use for random number generator
return_dist : bool
if True, return the null distribution of t statistics
Returns
-------
obs_t : float or array of floats
group mean T statistic(s) corresponding to tests in input
obs_p : float or array of floats
one-tailed p value that the population mean is greater than h_0
(1 - percentile under the null)
dist : ndarray, optional
if return_dist is True, the null distribution of t statistics
"""
a = np.asarray(a, np.float)
if a.ndim < 2:
a = a.reshape(-1, 1)
n_samp, n_test = a.shape
a -= h_0
rs = np.random.RandomState(random_seed)
flipper = (rs.uniform(size=(n_samp, n_iter)) > 0.5) * 2 - 1
flipper = (flipper.reshape(n_samp, 1, n_iter) *
np.ones((n_samp, n_test, n_iter), int))
rand_dist = a[:, :, None] * flipper
err_denom = np.sqrt(n_samp - 1)
std_err = rand_dist.std(axis=0) / err_denom
t_dist = rand_dist.mean(axis=0) / std_err
obs_t = a.mean(axis=0) / (a.std(axis=0) / err_denom)
if corrected:
obs_p = 1 - percentile_score(t_dist.max(axis=0), obs_t) / 100
else:
obs_p = []
for obs_i, null_i in zip(obs_t, t_dist):
obs_p.append(1 - percentile_score(null_i, obs_i) / 100)
obs_p = np.array(obs_p)
if a.shape[1] == 1:
obs_t = np.asscalar(obs_t)
obs_p = np.asscalar(obs_p)
t_dist = t_dist.squeeze()
if return_dist:
return obs_t, obs_p, t_dist
return obs_t, obs_p | 2af8b9592f82b14dda1f59e41663e7253eb7dbe8 | 8,149 |
from typing import Optional
from typing import Dict
def git_get_project(
directory: str, token: Optional[str] = None, revisions: Optional[Dict[str, str]] = None
) -> BuiltInCommand:
"""
Create an Evergreen command to clones the tracked project and check current revision.
Also, applies patches if the task was created by a patch build.
:param directory: Directory to clone into.
:param token: Use a token to clone instead of ssh key.
:param revisions: Map of revisions to use for modules.
"""
params = {
"directory": directory,
}
add_if_exists(params, "token", token)
add_if_exists(params, "revisions", revisions)
return BuiltInCommand("git.get_project", params) | b4cc4e3335c6c91556d02c76761082d95baee775 | 8,150 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.