content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def mark(symbol):
"""Wrap the symbol's result in a tuple where the first element is `symbol`.
Used where the information about "which branch of the grammar was used"
must be propagated upwards for further checks.
"""
def mark_action(x):
return (symbol, x)
return mark_action << symbol
|
3180c96d4d2a68df2909f23a544879918016fb37
| 3,636,454
|
def restore_dimensions(array, from_dims, result_like, result_attrs=None):
"""
Restores a numpy array to a DataArray with similar dimensions to a reference
Data Array. This is meant to be the reverse of get_numpy_array.
Parameters
----------
array : ndarray
The numpy array from which to create a DataArray
from_dims : list of str
The directions describing the numpy array. If being used to reverse
a call to get_numpy_array, this should be the same as the out_dims
argument used in the call to get_numpy_array.
'x', 'y', and 'z' indicate any axes
registered to those directions with
:py:function:`~sympl.set_direction_names`. '*' indicates an axis
which is the flattened collection of all dimensions not explicitly
listed in out_dims, including any dimensions with unknown direction.
result_like : DataArray
A reference array with the desired output dimensions of the DataArray.
If being used to reverse a call to get_numpy_array, this should be
the same as the data_array argument used in the call to get_numpy_array.
result_attrs : dict, optional
A dictionary with the desired attributes of the output DataArray. If
not given, no attributes will be set.
Returns
-------
data_array : DataArray
The output DataArray with the same dimensions as the reference
DataArray.
See Also
--------
:py:function:~sympl.get_numpy_array: : Retrieves a numpy array with desired
dimensions from a given DataArray.
"""
current_dim_names = {}
for dim in from_dims:
if dim != '*':
current_dim_names[dim] = [dim]
direction_to_names = get_input_array_dim_names(
result_like, from_dims, current_dim_names)
original_shape = []
original_dims = []
original_coords = []
for direction in from_dims:
if direction in direction_to_names.keys():
for name in direction_to_names[direction]:
original_shape.append(len(result_like.coords[name]))
original_dims.append(name)
original_coords.append(result_like.coords[name])
if np.product(array.shape) != np.product(original_shape):
raise ShapeMismatchError
data_array = DataArray(
np.reshape(array, original_shape),
dims=original_dims,
coords=original_coords).transpose(
*list(result_like.dims))
if result_attrs is not None:
data_array.attrs = result_attrs
return data_array
|
401015b3e33f17bb7e5be078270391efb0543bfa
| 3,636,455
|
import collections
def _combine_qc_samples(samples):
"""Combine split QC analyses into single samples based on BAM files.
"""
by_bam = collections.defaultdict(list)
for data in [utils.to_single_data(x) for x in samples]:
batch = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batch, (list, tuple)):
batch = [batch]
batch = tuple(batch)
by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data)
out = []
for data_group in by_bam.values():
data = data_group[0]
alg_qc = []
qc = {}
metrics = {}
for d in data_group:
qc.update(dd.get_summary_qc(d))
metrics.update(dd.get_summary_metrics(d))
alg_qc.extend(dd.get_algorithm_qc(d))
data["config"]["algorithm"]["qc"] = alg_qc
data["summary"]["qc"] = qc
data["summary"]["metrics"] = metrics
out.append([data])
return out
|
b9fb88f7fae9c6dda8f2435b8c7fcfab5ab15ad2
| 3,636,456
|
import re
def doc(
package_name: str,
plugin_name: str,
long_doc: bool = True,
include_details: bool = False,
) -> str:
"""Document one plug-in
Documentation is taken from the module doc-string. If the plug-in is not part of the
package an UnknownPluginError is raised.
Args:
package_name: Name of package containing plug-ins.
plugin_name: Name of the plug-in (module).
long_doc: Use long doc-string or short one-line string.
include_details: Include development details like parameters and return values?
Returns:
Documentation of the plug-in.
"""
# Get Plugin-object and pick out doc-string
doc = load(package_name, plugin_name).doc
if long_doc:
# Strip short description and indentation
lines = [d.strip() for d in "\n\n".join(doc.split("\n\n")[1:]).split("\n")]
# Stop before Args:, Returns: etc if details should not be included
idx_args = len(lines)
if not include_details:
re_args = re.compile("(Args:|Returns:|Details:|Attributes:)$")
try:
idx_args = [re_args.match(l) is not None for l in lines].index(True)
except ValueError:
pass
return "\n".join(lines[:idx_args]).strip()
else:
# Return short description
return doc.split("\n\n")[0].replace("\n", " ").strip()
|
a6c3a1c03936262815299657c6264f70be1e92ba
| 3,636,458
|
def microsecond(dt):
""":yaql:property microsecond
Returns microseconds of given datetime.
:signature: datetime.microsecond
:returnType: integer
.. code::
yaql> datetime(2006, 11, 21, 16, 30, 2, 123).microsecond
123
"""
return dt.microsecond
|
31d195fa4ceb468bb5666751e56b836fbec8f822
| 3,636,459
|
import math
def decompose_label_vector(label_vector, n_xgrids, n_ygrids, mean_lwh,
xlim=(0.0, 70.0), ylim=(-50.0,50.0), zlim=(-10.0,10.0),
conf_thres=0.5, nms=True, iou_thres=0.1):
""" Build the ground-truth label vector
given a set of poses, classes, and
number of grids.
Input:
label_vector: label vector outputted from the model
n_xgrids: number of grids in the x direction
n_ygrids: number of grids in the y direction
Output:
poses: list of object poses [x,y,z,l,w,h,yaw]
classes: list of object classes
"""
conf = []
poses = []
classes = []
label_dict_list = []
# obtain x index
xstop = (xlim[1] - xlim[0]) / float(n_xgrids)
# obtain y index
ystop = (ylim[1] - ylim[0]) / float(n_ygrids)
# length of each object label
obj_label_len = pose_vec_len + len(label_map) # 8 for poses, rest for object classes
# reshape the vector
label_vector_reshaped = np.reshape(label_vector, (-1, obj_label_len))
# get each element
obj_confidences = label_vector_reshaped[:, 0]
obj_poses = label_vector_reshaped[:, 1:pose_vec_len]
obj_class_one_hot = label_vector_reshaped[:, pose_vec_len:]
# iterate through each element
for i, obj_conf in enumerate(obj_confidences):
if obj_conf > conf_thres:
# pose vector
x_norm, y_norm, z_norm, l_norm, w_norm, h_norm, cos_yaw_norm, sin_yaw_norm = obj_poses[i]
cls_ = idx_to_label(np.argmax(obj_class_one_hot[i]))
mean_lwh_cls = mean_lwh[cls_]
# get indices
x_idx = math.floor(i / n_xgrids)
y_idx = i - (x_idx * n_xgrids)
# denormalize pose
x = (x_norm * xstop) + (x_idx * xstop) + xlim[0]
y = (y_norm * ystop) + (y_idx * ystop) + ylim[0]
z = (z_norm * (zlim[1] - zlim[0])) + zlim[0]
l = mean_lwh_cls[0]*math.exp(l_norm)
w = mean_lwh_cls[1]*math.exp(w_norm)
h = mean_lwh_cls[2]*math.exp(h_norm)
cos_yaw = (cos_yaw_norm * 2.0) - 1.0
sin_yaw = (sin_yaw_norm * 2.0) - 1.0
yaw = np.arctan2(sin_yaw, cos_yaw)
# add poses, classes, and conf
label_dict = {}
label_dict['conf'] = obj_conf
label_dict['x'] = x
label_dict['y'] = y
label_dict['z'] = z
label_dict['l'] = l
label_dict['w'] = w
label_dict['h'] = h
label_dict['yaw'] = yaw
label_dict['class'] = idx_to_label(np.argmax(obj_class_one_hot[i]))
# label_dict['conf'] = np.max(obj_class_one_hot[i])
label_dict_list.append(label_dict)
# non-max suppression
if nms == True:
label_dict_list = non_max_suppression(label_dict_list, iou_threshold=iou_thres)
# return label dictionary
return label_dict_list
|
0cf34bad28a5c8dc335110be95ace5e41d8fa534
| 3,636,460
|
def gen_delay_phs(fqs, ants, dly_rng=(-20, 20)):
"""
Produce a set of mock complex phasors corresponding to cables delays.
Args:
fqs (array-like): shape=(NFREQS,), GHz
the spectral frequencies of the bandpasses
ants (iterable):
the indices/names of the antennas
dly_range (2-tuple): ns
the range of the delay
Returns:
g (dictionary):
a dictionary of ant:exp(2pi*i*tau*fqs) pairs where keys are elements
of ants and values are complex arrays with shape (NFREQS,)
See Also:
:meth:`~gen_gains`: uses this function to generate full gains.
"""
phs = {}
for ai in ants:
dly = np.random.uniform(dly_rng[0], dly_rng[1])
phs[ai] = np.exp(2j * np.pi * dly * fqs)
return phs
|
3e9d2b6bab886c8d6b7b3ef5869d74cf21689e06
| 3,636,461
|
def what_to_add(qtype, origword, newword, terminate):
"""Return a qtype that is needed to finish a partial word.
For example, given an origword of '\"frog' and a newword of '\"frogston',
returns either:
terminate=False: 'ston'
terminate=True: 'ston\"'
This is useful when calculating tab completion strings for readline.
Args:
qtype: the type of quoting to use (ie. the first character of origword)
origword: the original word that needs completion.
newword: the word we want it to be after completion. Must start with
origword.
terminate: true if we should add the actual quote character at the end.
Returns:
The string to append to origword to produce (quoted) newword.
"""
if not newword.startswith(origword):
return ''
else:
qold = quotify(qtype, origword, terminate=False)
return quotify(qtype, newword, terminate=terminate)[len(qold):]
|
c5b06aa1db322e0f6c6d041562ea3585482d789b
| 3,636,463
|
def intersect(start1, end1, start2, end2):
"""Return the intersection point of two lines, else return None.
Ideas:
For parallel lines to intercept (equal slope and y-intercept),
they must be overlapping segments of the same infinite line.
Intersection point is given by solving line equation 1 = line equation 2,
m1 * x + c1 = m2 * x + c2
x = (c2 - c1) / (m1 - m2)
Additionally, the intersection must exist within the x-y boundaries of the two lines.
"""
if start1.x > end1.x:
tmp = end1
end1 = start1
start1 = tmp
if start2.x > end2.x:
tmp = end2
end2 = start2
start2 = tmp
if start1.x > start2.x:
tmp = start2
start2 = start1
start1 = tmp
tmp = end2
end2 = end1
end1 = tmp
l1 = Line(start1, end1)
l2 = Line(start2, end2)
if l1.slope == l2.slope:
if l1.intercept == l2.intercept and start2.is_between(start1, end1):
return start2
return None
x = (l2.intercept - l1.intercept) / (l1.slope - l2.slope)
y = x * l1.slope + l1.intercept
res = Point(x, y)
if res.is_between(start1, end1) and res.is_between(start2, end2):
return res
return None
|
cd5affbdc57d48783cf50f188b979ad24f117c37
| 3,636,464
|
def start(update, context):
"""Displays welcome message."""
# choose_lang = True
# If we're starting over we don't need do send a new message
if not context.user_data.get(START_OVER):
user = update.message.from_user
try:
context.user_data[LANG] = user.language_code
logger.info(
f'User language: {texts.LANGUAGE[context.user_data[LANG]]["name"]}')
# choose_lang = False
except:
# Default lang
context.user_data[LANG] = 'en'
update.message.reply_text(
texts.WELCOME[context.user_data[LANG]] + ' \U0001F5FA', parse_mode=ParseMode.HTML)
text = texts.COMMANDS[context.user_data[LANG]]
update.message.reply_text(
text=text, parse_mode=ParseMode.HTML,
# resize_keyboard=True, reply_markup=keyboard
)
# Clear user context
context.user_data.clear()
context.user_data[START_OVER] = True
return select_lang(update, context)
|
cbb8e0f49f35de1dbd0f47e71b114b2c22ed5ec0
| 3,636,465
|
def model_dir_str(model_dir, hidden_units, logits, processor=lambda: pc.IdentityProcessor(),
activation=tf.nn.relu, uuid=None):
"""Returns a string for the model directory describing the network.
Note that it only stores the information that describes the layout of the network - in particular it does not
describe any training hyperparameters (in particular dropout rate).
"""
layer_counter = [(k, sum(1 for _ in g)) for k, g in it.groupby(hidden_units)]
for layer_size, layer_repeat in layer_counter:
if layer_repeat == 1:
model_dir += '{}_'.format(layer_size)
else:
model_dir += '{}x{}_'.format(layer_size, layer_repeat)
model_dir += '{}__'.format(logits)
model_dir += processor().__class__.__name__
if isinstance(activation, ft.partial):
activation_fn = activation.func
alpha = str(activation.keywords['alpha']).replace('.', '')
else:
activation_fn = activation
alpha = '02'
model_dir += '_' + activation_fn.__name__.replace('_', '')
if activation_fn is tf.nn.leaky_relu:
model_dir += alpha
if uuid not in (None, ''):
model_dir += '_' + str(uuid)
return model_dir
|
00ee6a98dfc1f614f335a187f3f998edc908e25d
| 3,636,466
|
def validate_search_inputs(row_id, search_column, search_value):
"""Function that determines if row_id, search_column and search_value are defined correctly"""
return_value = {
"valid": True,
"msg": None
}
a_search_var_defined = True if search_column or search_value else False
if row_id and a_search_var_defined:
return_value["valid"] = False
return_value["msg"] = "Only 'row_id' or the 'search_column and search_value' pair can be defined"
elif not row_id and not a_search_var_defined:
return_value["valid"] = False
return_value["msg"] = "You must define either 'row_id' or the 'search_column and search_value' pair"
return return_value
|
ce85ce1b973beab6b0476dfc05edc594fac8c420
| 3,636,467
|
def B1(i,n,t):
"""Restituisce il polinomio di Bernstein (i,n) valutato in t,
usando la definizione binomiale"""
if i < 0 or i > n:
return 0
return binom(n,i)* t**i * (1-t)**(n-i)
|
ac97d943494e3b194d71de9ae1864633268499ec
| 3,636,468
|
def get_text_between(text, before_text, after_text):
"""Return the substring of text between before_text and after_text."""
pos1 = text.find(before_text)
if pos1 != -1:
pos1 += len(before_text)
pos2 = text.find(after_text, pos1)
if pos2 != -1:
return text[pos1:pos2].strip()
else:
error_message = f"Can't find '{after_text}' within a longer text."
raise VersionParsingError(error_message)
else:
error_message = f"Can't find '{before_text}' within a longer text."
raise VersionParsingError(error_message)
|
4ec7f1900881422599b05f64b1c8eec8c992452d
| 3,636,469
|
def _get_functional_form_section(input_string):
""" grabs the section of text containing all of the job keywords
for functional form of PIPs
"""
pattern = (escape('$functional_form') + LINE_FILL + NEWLINE +
capturing(one_or_more(WILDCARD, greedy=False)) +
escape('$end'))
section = first_capture(pattern, input_string)
assert section is not None
return section
|
d4f2061f355c6a09ec564b0d60b0cf6b82d022b8
| 3,636,471
|
def rfftn(a, s=None, axes=None):
"""Multi-dimensional discrete Fourier transform for real input.
Compute the multi-dimensional discrete Fourier transform for real input.
This function is a wrapper for :func:`pyfftw.interfaces.numpy_fft.rfftn`,
with an interface similar to that of :func:`numpy.fft.rfftn`.
Parameters
----------
a : array_like
Input array (taken to be real)
s : sequence of ints, optional (default None)
Shape of the output along each transformed axis (input is cropped
or zero-padded to match).
axes : sequence of ints, optional (default None)
Axes over which to compute the DFT.
Returns
-------
af : complex ndarray
DFT of input array
"""
return pyfftw.interfaces.numpy_fft.rfftn(
a, s=s, axes=axes, overwrite_input=False,
planner_effort='FFTW_MEASURE', threads=pyfftw_threads)
|
9df68b5655d624d6f095b8a33ce31bc706c7ac7a
| 3,636,472
|
from pathlib import Path
import pathlib
import requests
import asyncio
async def ChannelLogoAPI(
channel_id:str = Path(..., description='チャンネル ID 。ex:gr011'),
):
"""
チャンネルのロゴを取得する。
"""
# チャンネル情報を取得
channel = await Channels.filter(channel_id=channel_id).get_or_none()
# 指定されたチャンネル ID が存在しない
if channel is None:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail='Specified channel_id was not found',
)
# ブラウザにキャッシュしてもらえるようにヘッダーを設定
# ref: https://qiita.com/yuuuking/items/4f11ccfc822f4c198ab0
header = {
'Cache-Control': 'public, max-age=2592000', # 30日間
}
# ***** 同梱のロゴを利用(存在する場合)*****
# 放送波から取得できるロゴはどっちみち画質が悪いし、取得できていないケースもありうる
# そのため、同梱されているロゴがあればそれを返すようにする
# ロゴは NID32736-SID1024.png のようなファイル名の PNG ファイル (256x256) を想定
if pathlib.Path.exists(LOGO_DIR / f'{channel.id}.png'):
return FileResponse(LOGO_DIR / f'{channel.id}.png', headers=header)
# ***** ロゴが全国共通なので、チャンネル名の前方一致で決め打ち *****
## NHK総合
if channel.channel_type == 'GR' and channel.channel_name.startswith('NHK総合'):
return FileResponse(LOGO_DIR / 'NID32736-SID1024.png', headers=header)
## NHKEテレ
if channel.channel_type == 'GR' and channel.channel_name.startswith('NHKEテレ'):
return FileResponse(LOGO_DIR / 'NID32737-SID1032.png', headers=header)
# 複数の地域で放送しているケーブルテレビの場合、コミュニティチャンネルの NID と SID は地域ごとに異なる
# ref: https://youzaka.hatenablog.com/entry/2013/06/30/154243
# J:COMテレビ
if channel.channel_type == 'GR' and channel.channel_name.startswith('J:COMテレビ'):
return FileResponse(LOGO_DIR / 'NID32397-SID23656.png', headers=header)
# J:COMチャンネル
if channel.channel_type == 'GR' and channel.channel_name.startswith('J:COMチャンネル'):
return FileResponse(LOGO_DIR / 'NID32399-SID23672.png', headers=header)
# eo光チャンネル
if channel.channel_type == 'GR' and channel.channel_name.startswith('eo光チャンネル'):
return FileResponse(LOGO_DIR / 'NID32127-SID41080.png', headers=header)
# ZTV
if channel.channel_type == 'GR' and channel.channel_name.startswith('ZTV'):
return FileResponse(LOGO_DIR / 'NID32047-SID46200.png', headers=header)
# ***** サブチャンネルのロゴを取得 *****
# 地デジでかつサブチャンネルのみ、メインチャンネルにロゴがあればそれを利用する
if channel.channel_type == 'GR' and channel.is_subchannel is True:
# メインチャンネルの情報を取得
# ネットワーク ID が同じチャンネルのうち、一番サービス ID が若いチャンネルを探す
main_channel = await Channels.filter(network_id=channel.network_id).order_by('service_id').first()
# メインチャンネルが存在し、ロゴも存在する
if main_channel is not None and pathlib.Path.exists(LOGO_DIR / f'{main_channel.id}.png'):
return FileResponse(LOGO_DIR / f'{main_channel.id}.png', headers=header)
# BS でかつサブチャンネルのみ、メインチャンネルにロゴがあればそれを利用する
if channel.channel_type == 'BS' and channel.is_subchannel is True:
# メインチャンネルのサービス ID を算出
# NHKBS1 と NHKBSプレミアム だけ特別に、それ以外は一の位が1のサービス ID を算出
if channel.service_id == 102:
main_service_id = 101
elif channel.service_id == 104:
main_service_id = 103
else:
main_service_id = int(channel.channel_number[0:2] + '1')
# メインチャンネルの情報を取得
main_channel = await Channels.filter(network_id=channel.network_id, service_id=main_service_id).first()
# メインチャンネルが存在し、ロゴも存在する
if main_channel is not None and pathlib.Path.exists(LOGO_DIR / f'{main_channel.id}.png'):
return FileResponse(LOGO_DIR / f'{main_channel.id}.png', headers=header)
# ***** Mirakurun からロゴを取得 *****
if CONFIG['general']['backend'] == 'Mirakurun':
# Mirakurun 形式のサービス ID
# NID と SID を 5 桁でゼロ埋めした上で int に変換する
mirakurun_service_id = int(str(channel.network_id).zfill(5) + str(channel.service_id).zfill(5))
# Mirakurun の API からロゴを取得する
# 同梱のロゴが存在しない場合のみ
mirakurun_logo_api_url = f'{CONFIG["general"]["mirakurun_url"]}/api/services/{mirakurun_service_id}/logo'
mirakurun_logo_api_response:requests.Response = await asyncio.to_thread(requests.get, mirakurun_logo_api_url)
# ステータスコードが 200 であれば
# ステータスコードが 503 の場合はロゴデータが存在しない
if mirakurun_logo_api_response.status_code == 200:
# 取得したロゴデータを返す
mirakurun_logo = mirakurun_logo_api_response.content
return Response(content=mirakurun_logo, media_type='image/png', headers=header)
# ***** EDCB からロゴを取得 *****
if CONFIG['general']['backend'] == 'EDCB':
# CtrlCmdUtil を初期化
edcb = CtrlCmdUtil()
# EDCB の LogoData フォルダからロゴを取得
logo = None
files = await edcb.sendFileCopy2(['LogoData.ini', 'LogoData\\*.*']) or []
if len(files) == 2:
logo_data_ini = EDCBUtil.convertBytesToString(files[0]['data'])
logo_dir_index = EDCBUtil.convertBytesToString(files[1]['data'])
logo_id = EDCBUtil.getLogoIDFromLogoDataIni(logo_data_ini, channel.network_id, channel.service_id)
if logo_id >= 0:
# なるべく画質が良いロゴタイプのものを取得
for logo_type in [5, 2, 4, 1, 3, 0]:
logo_name = EDCBUtil.getLogoFileNameFromDirectoryIndex(logo_dir_index, channel.network_id, logo_id, logo_type)
if logo_name is not None:
files = await edcb.sendFileCopy2(['LogoData\\' + logo_name]) or []
if len(files) == 1:
logo = files[0]['data']
logo_media_type = 'image/bmp' if logo_name.upper().endswith('.BMP') else 'image/png'
break
# 取得したロゴデータを返す
if logo is not None and len(logo) > 0:
return Response(content=logo, media_type=logo_media_type, headers=header)
# ***** デフォルトのロゴ画像を利用 *****
# 同梱のロゴファイルも Mirakurun や EDCB からのロゴもない場合のみ
return FileResponse(LOGO_DIR / 'default.png', headers=header)
|
ab0e149141cd678b7890927b5b9e50bb9c34a91e
| 3,636,473
|
import pathlib
def ImportFromNpb(
db: bytecode_database.Database, cmake_build_root: pathlib.Path
) -> int:
"""Import the cmake files from the given build root."""
bytecodes_to_process = FindBitcodesToImport(cmake_build_root)
i = 0
with sqlutil.BufferedDatabaseWriter(db, max_buffer_length=10) as writer:
for i, bytecode in enumerate(
[ProcessBitcode(b) for b in (bytecodes_to_process)]
):
app.Log(1, "%s:%s", bytecode.source_name, bytecode.relpath)
writer.AddOne(bytecode)
return i
|
3f8635fe64c7bfcd306e847334badcc5a2c5b2e0
| 3,636,474
|
def _partial_ema_scov_init(n_dim=None, r:float=0.025, n_emp=None, target:float=None)->dict:
""" Initialize object to track partial moments
r: Importance of current data point
n_emp: Discouraged. Really only used for tests.
This is the number of samples for which empirical is used, rather
than running updates. By default n_emp ~ 1/r
"""
s = dict([ (q,_ema_scov_init(n_dim=n_dim,r=r,n_emp=n_emp)) for q in QUADRANTS ])
q = next(iter(s.keys())) # Choose any
s['n_dim'] = s[q]['n_dim']
s['n_emp'] = s[q]['n_emp']
s['rho'] = s[q]['rho']
s['target'] = target
s['sma'] = sma({},n_dim,r=r)
return s
|
5c73db5f3758781a7a47cc72ad85784ada6e57fa
| 3,636,475
|
def inner(thing):
""" one level """
if isinstance(thing, DataPackage):
return thing,
else:
return list(thing)
|
17eb8b2a272144b4a1732d8f6ce1f40c18f79b8a
| 3,636,476
|
def load_dataset(data_name):
"""Load dataset.
Args:
data_name (str): The name of dataset.
Returns:
dataset (pgl.dataset): Return the corresponding dataset, containing graph information, feature, etc.
data_mode (str): Currently we have 's' and 'm' mode, which mean small dataset and medium dataset respectively.
"""
data_name = data_name.lower()
if data_name == 'reddit':
data_mode = 'm'
dataset = pgl.dataset.RedditDataset()
y = np.zeros(dataset.graph.num_nodes, dtype="int64")
y[dataset.train_index] = dataset.train_label
y[dataset.val_index] = dataset.val_label
y[dataset.test_index] = dataset.test_label
dataset.y = y
elif data_name == 'arxiv':
data_mode = 'm'
dataset = pgl.dataset.OgbnArxivDataset()
dataset.graph = to_undirected(dataset.graph, copy_node_feat=False)
dataset.graph = add_self_loops(dataset.graph, copy_node_feat=False)
elif data_name == 'cora':
data_mode = 's'
dataset = pgl.dataset.CoraDataset()
elif data_name == 'pubmed':
data_mode = 's'
dataset = pgl.dataset.CitationDataset("pubmed", symmetry_edges=True)
elif data_name == 'citeseer':
data_mode = 's'
dataset = pgl.dataset.CitationDataset("citeseer", symmetry_edges=True)
else:
raise ValueError(data_name + " dataset doesn't exist currently.")
if data_mode == 's':
def normalize(feat):
return feat / np.maximum(np.sum(feat, -1, keepdims=True), 1)
indegree = dataset.graph.indegree()
dataset.graph.node_feat["words"] = normalize(dataset.graph.node_feat[
"words"])
dataset.feature = dataset.graph.node_feat["words"]
dataset.train_mask = generate_mask(dataset.graph.num_nodes,
dataset.train_index)
dataset.val_mask = generate_mask(dataset.graph.num_nodes,
dataset.val_index)
dataset.test_mask = generate_mask(dataset.graph.num_nodes,
dataset.test_index)
return dataset, data_mode
|
f99dcc9d64085ef545658d34deb4936f37305f11
| 3,636,477
|
def require_dataset(hdf5_data, path, shape, dtype, maxshape=(None)):
"""
Create or update a dataset, making sure that its shape is resized
if needed
Args:
hdf5_data: object, an already opened hdf5 file
path: string, the path to the dataset
shape: tuple of integers, the shape of the dataset
dtype: string or int, the type of the dataset
maxshape: tuple of integers, the maximum shape to which the dataset can be
resized to. (Unused currently)
Returns:
The dataset newly created or updated.
"""
dset = hdf5_data.get(path, default = None)
# Dataset not existing
if dset is None:
maxshape = [None for i in xrange(len(shape))]
dset = hdf5_data.create_dataset(path, shape, dtype, maxshape=tuple(maxshape))
else:
# Dataset is already existing
dset.resize(shape)
return dset
|
dc9b3b4db56854cc2c770875a754474bbc5f56a3
| 3,636,478
|
def findquote(lrrbot, conn, event, respond_to, query):
"""
Command: !findquote QUERY
Section: quotes
Search for a quote in the quote database.
"""
quotes = lrrbot.metadata.tables["quotes"]
with lrrbot.engine.begin() as pg_conn:
fts_column = sqlalchemy.func.to_tsvector('english', quotes.c.quote)
query = sqlalchemy.select([
quotes.c.id, quotes.c.quote, quotes.c.attrib_name, quotes.c.attrib_date, quotes.c.context
]).where(
(fts_column.op("@@")(sqlalchemy.func.plainto_tsquery('english', query))) & (~quotes.c.deleted)
)
row = common.utils.pick_random_elements(pg_conn.execute(query), 1)[0]
if row is None:
return conn.privmsg(respond_to, "Could not find any matching quotes.")
qid, quote, name, date, context = row
conn.privmsg(respond_to, format_quote("Quote", qid, quote, name, date, context))
|
1d61f7c416d51d6c362b212b0217d1717ef79aa4
| 3,636,479
|
import re
def find_first_in_register_stop(seq):
"""
Find first stop codon on lowercase seq that starts at an index
that is divisible by three
"""
# Compile regexes for stop codons
regex_stop = re.compile('(taa|tag|tga)')
# Stop codon iterator
stop_iterator = regex_stop.finditer(seq)
# Find next stop codon that is in register
for stop in stop_iterator:
if stop.end() % 3 == 0:
return stop.end()
# Return -1 if we failed to find a stop codon
return -1
|
56741828c42ecf0cb96044d03c8d1b6bc4994e01
| 3,636,480
|
def greet_person(person: Person) -> str:
"""Return a greeting message for the given person.
The message should have the form 'Hello, <given_name> <family_name>!'
>>> david = Person('David', 'Liu', 110, '110 St. George Street')
>>> greet_person(david)
'Hello, David Liu!'
"""
return f'Hello, {person.given_name} {person.family_name}!'
|
3050e78295dfeee2d80c4d17fa7acc4bbfcb4d41
| 3,636,482
|
def sw(s1, s2, pen, matrix):
"""
Takes as input two sequences, gap penalty, BLOSUM or PAM dictionary
and returns the scoring matrix(F) and traceback matrix(P)
"""
N = len(s1) + 1
M = len(s2) + 1
F = [] #initialize scoring matrix(F) and traceback matrix(P)
P = []
F = [[0] * (N) for i in range(M)] # fill F and P with 0, defining
P = [[0] * (N) for i in range(M)] # their dimensions
for i in range(1, M):
P[i][0] = 'u'
for j in range(1, N):
P[0][j] = 'l'
for i in range(1, M):
for j in range(1, N): # core of the function: for each i,j position
voc = {} # the best score is added to F matrix,
up = F[i - 1][j] + pen # adding the gap penalty when necessary
left = F[i][j - 1] + pen # and its direction to P matrix
diag = F[i - 1][j - 1] + int(matrix[s1[j - 1] + s2[i - 1]])
voc[up] = 'u'
voc[left] = 'l' # u = up, l = left, d = diagonal
voc[diag] = 'd'
max_score = max(up, left, diag)
if max_score < 0: # all negative values are excluded and
F[i][j] = 0 # recorded as 0
else:
F[i][j] = max_score
P[i][j] = voc.get(max_score)
return(F, P)
|
c466997476259c4f2736ae0dec892f5f8e5f20e7
| 3,636,483
|
def random_multiplex_ER(n,l,p,directed=False):
""" random multilayer ER """
if directed:
G = nx.MultiDiGraph()
else:
G = nx.MultiGraph()
for lx in range(l):
network = nx.fast_gnp_random_graph(n, p, seed=None, directed=directed)
for edge in network.edges():
G.add_edge((edge[0],lx),(edge[1],lx),type="default")
## construct the ppx object
no = multi_layer_network(network_type="multiplex").load_network(G,input_type="nx",directed=directed)
return no
|
9a70997fb3de5db225b0282a3b217eb4e33f0a8c
| 3,636,485
|
def compare_structures(structure_a, structure_b):
"""Compare two StructureData objects A, B and return a delta (A - B) of the relevant properties."""
delta = AttributeDict()
delta.absolute = AttributeDict()
delta.relative = AttributeDict()
volume_a = structure_a.get_cell_volume()
volume_b = structure_b.get_cell_volume()
delta.absolute.volume = np.absolute(volume_a - volume_b)
delta.relative.volume = np.absolute(volume_a - volume_b) / volume_a
pos_a = np.array([site.position for site in structure_a.sites])
pos_b = np.array([site.position for site in structure_b.sites])
delta.absolute.pos = pos_a - pos_b
site_vectors = [delta.absolute.pos[i, :] for i in range(delta.absolute.pos.shape[0])]
a_lengths = np.linalg.norm(pos_a, axis=1)
delta.absolute.pos_lengths = np.array([np.linalg.norm(vector) for vector in site_vectors])
delta.relative.pos_lengths = np.array([np.linalg.norm(vector) for vector in site_vectors]) / a_lengths
cell_lengths_a = np.array(structure_a.cell_lengths)
delta.absolute.cell_lengths = np.absolute(cell_lengths_a - np.array(structure_b.cell_lengths))
delta.relative.cell_lengths = np.absolute(cell_lengths_a - np.array(structure_b.cell_lengths)) / cell_lengths_a
cell_angles_a = np.array(structure_a.cell_angles)
delta.absolute.cell_angles = np.absolute(cell_angles_a - np.array(structure_b.cell_angles))
delta.relative.cell_angles = np.absolute(cell_angles_a - np.array(structure_b.cell_angles)) / cell_angles_a
return delta
|
93a7b2a5d28abe844b9daabce840afe275ed851e
| 3,636,486
|
from typing import Callable
def parse_response(expected: str) -> Callable:
"""
Decorator for a function that returns a requests.Response object.
This decorator parses that response depending on the value of <expected>.
If the response indicates the request failed (status >= 400) a dictionary
containing the response status and message will be returned. Otherwise,
the content will be parsed and a dictionary or list will be returned if
expected == 'json', a string will be returned if expected == 'text' and
a binary string will be returned if expected == 'content'.
This also updates the return annotation for the wrapped function according
to the expected return value type.
"""
def _parser(f):
@wraps(f)
def _f(*args, **kwargs):
response = f(*args, **kwargs)
if not response.ok or expected == "json":
return response.json()
if expected == "content":
return response.content
if expected == "text":
return response.text
return response.json()
f.__annotations__["return"] = _get_expected_return(expected)
return _f
return _parser
|
2d50fb98553e1803ef86056a0455a864c17bb065
| 3,636,487
|
def find_parents(candidate, branches):
"""Find parents genre of a given genre, ordered from the closest to
the further parent.
"""
for branch in branches:
try:
idx = branch.index(candidate.lower())
return list(reversed(branch[:idx + 1]))
except ValueError:
continue
return [candidate]
|
17934d9ee1d3098cc3d08f38d9e3c387df6b7c19
| 3,636,488
|
import torch
def swig_ptr_from_FloatTensor(x):
""" gets a Faiss SWIG pointer from a pytorch tensor (on CPU or GPU) """
assert x.is_contiguous()
assert x.dtype == torch.float32
return faiss.cast_integer_to_float_ptr(
x.storage().data_ptr() + x.storage_offset() * 4)
|
d1cdf905fcd45053e9cf42306a68408fa68d1ddf
| 3,636,489
|
def generate_reference_user_status(user,references):
"""Generate reference user status instances for a given set of references.
WARNING: the new instances are not saved in the database!
"""
new_ref_status = []
for ref in references:
source_query = ref.sources.filter(userprofile=user.userprofile)\
.distinct().order_by("pub_date")
try:
s = source_query.get()
except MultipleObjectsReturned:
s = source_query.all()[0]
except ObjectDoesNotExist:
s = get_unknown_reference()
rust = ReferenceUserStatus()
rust.main_source = s
rust.owner = user
rust.reference = ref
rust.reference_pub_date = ref.pub_date
new_ref_status.append(rust)
return new_ref_status
|
a0d859d06ee4f4a8f47aaad4e6814ae232e6d751
| 3,636,490
|
def binned_bitsets_by_chrom( f, chrom, chrom_col=0, start_col=1, end_col=2):
"""Read a file by chrom name into a bitset"""
bitset = BinnedBitSet( MAX )
for line in f:
if line.startswith("#"): continue
fields = line.split()
if fields[chrom_col] == chrom:
start, end = int( fields[start_col] ), int( fields[end_col] )
bitset.set_range( start, end-start )
return bitset
|
4e45b58d56f0dcb290995814666db36fa0fca0c7
| 3,636,491
|
def timeperiod_contains(
timeperiod: spec.Timeperiod,
other_timeperiod: spec.Timeperiod,
) -> bool:
"""return bool of whether timeperiod contains other timeperiod"""
start, end = timeperiod_crud.compute_timeperiod_start_end(timeperiod)
other_start, other_end = timeperiod_crud.compute_timeperiod_start_end(
other_timeperiod
)
return (start <= other_start) and (end >= other_end)
|
62c0f48b30e550a6c223aa46f0e63bf7baac9f4d
| 3,636,492
|
import copy
def asdict(obj, dict_factory=dict, filter_field_type=None):
"""
Version of dataclasses.asdict that can use field type infomation.
"""
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
if filter_field_type is None:
continue
field_type_from_metadata = f.metadata.get('type', None)
if field_type_from_metadata != filter_field_type and field_type_from_metadata is not None:
continue
value = asdict(getattr(obj, f.name), dict_factory, filter_field_type)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
return type(obj)(*[asdict(v, dict_factory, filter_field_type) for v in obj])
elif isinstance(obj, (list, tuple)):
return type(obj)(asdict(v, dict_factory, filter_field_type) for v in obj)
elif isinstance(obj, dict):
return type(obj)((asdict(k, dict_factory, filter_field_type),
asdict(v, dict_factory, filter_field_type))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
|
2f5f60bbe7cef89cd13dbde1ffc1c3e11f8e2152
| 3,636,493
|
def process_pdb_file(pdb_file, atom_info_only=False):
"""
Reads pdb_file data and returns in a dictionary format
:param pdb_file: str, the location of the file to be read
:param atom_info_only: boolean, whether to read the atom coordinates only or all atom data
:return: pdb_data, dict organizing pdb data by section
"""
pdb_data = {NUM_ATOMS: 0, SEC_HEAD: [], SEC_ATOMS: [], SEC_TAIL: []}
if atom_info_only:
pdb_data[SEC_ATOMS] = {}
atom_id = 0
with open(pdb_file) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
line_head = line[:PDB_LINE_TYPE_LAST_CHAR]
# head_content to contain Everything before 'Atoms' section
# also capture the number of atoms
# match 5 letters so don't need to set up regex for the ones that have numbers following the letters
# noinspection SpellCheckingInspection
if line_head[:-1] in ['HEADE', 'TITLE', 'REMAR', 'CRYST', 'MODEL', 'COMPN',
'NUMMD', 'ORIGX', 'SCALE', 'SOURC', 'AUTHO', 'CAVEA',
'EXPDT', 'MDLTY', 'KEYWD', 'OBSLT', 'SPLIT', 'SPRSD',
'REVDA', 'JRNL ', 'DBREF', 'SEQRE', 'HET ', 'HETNA',
'HETSY', 'FORMU', 'HELIX', 'SHEET', 'SSBON', 'LINK ',
'CISPE', 'SITE ', ]:
# noinspection PyTypeChecker
pdb_data[SEC_HEAD].append(line)
# atoms_content to contain everything but the xyz
elif line_head == 'ATOM ' or line_head == 'HETATM':
# By renumbering, handles the case when a PDB template has ***** after atom_id 99999.
# For renumbering, making sure prints in the correct format, including num of characters:
atom_id += 1
if atom_id > 99999:
atom_num = format(atom_id, 'x')
else:
atom_num = '{:5d}'.format(atom_id)
# Alternately, use this:
# atom_num = line[cfg[PDB_LINE_TYPE_LAST_CHAR]:cfg[PDB_ATOM_NUM_LAST_CHAR]]
atom_type = line[PDB_ATOM_NUM_LAST_CHAR:PDB_ATOM_TYPE_LAST_CHAR]
res_type = line[PDB_ATOM_TYPE_LAST_CHAR:PDB_RES_TYPE_LAST_CHAR]
mol_num = int(line[PDB_RES_TYPE_LAST_CHAR:PDB_MOL_NUM_LAST_CHAR])
pdb_x = float(line[PDB_MOL_NUM_LAST_CHAR:PDB_X_LAST_CHAR])
pdb_y = float(line[PDB_X_LAST_CHAR:PDB_Y_LAST_CHAR])
pdb_z = float(line[PDB_Y_LAST_CHAR:PDB_Z_LAST_CHAR])
last_cols = line[PDB_Z_LAST_CHAR:]
element_type = line[PDB_BEFORE_ELE_LAST_CHAR:PDB_ELE_LAST_CHAR]
if atom_info_only:
atom_xyz = np.array([pdb_x, pdb_y, pdb_z])
pdb_data[SEC_ATOMS][atom_id] = {ATOM_TYPE: element_type, ATOM_COORDS: atom_xyz}
else:
line_struct = [line_head, atom_num, atom_type, res_type, mol_num, pdb_x, pdb_y, pdb_z, last_cols]
# noinspection PyTypeChecker
pdb_data[SEC_ATOMS].append(line_struct)
elif line_head == 'END':
pdb_data[SEC_TAIL].append(line)
break
# tail_content to contain everything after the 'Atoms' section
else:
# noinspection PyTypeChecker
pdb_data[SEC_TAIL].append(line)
pdb_data[NUM_ATOMS] = len(pdb_data[SEC_ATOMS])
return pdb_data
|
c3328ec0123d49e2776aee84a1fdce56fb9dc84c
| 3,636,494
|
def get_insns(*, cls=None, variant: Variant = RV32I):
"""
Get all Instructions. This is based on all known subclasses of `cls`. If non
is given, all Instructions are returned. Only such instructions are returned
that can be generated, i.e., that have a mnemonic, opcode, etc. So other
classes in the hierarchy are not matched.
:param cls: Base class to get list :type cls: Instruction :return: List of
instruction classes
"""
insns = []
if cls is None:
cls = Instruction
# This filters out abstract classes
if cls.mnemonic:
if variant is None or cls.variant <= variant:
insns = [cls]
for subcls in cls.__subclasses__():
insns += get_insns(cls=subcls, variant=variant)
insns = list(dict.fromkeys(insns)) # Remove duplicates
return insns
|
8f0947ebd5750e19f557959f9ccbe6c9e0ee944e
| 3,636,495
|
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with backend.name_scope(name or 'assert_all_equal'):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))
with ops.control_dependencies(assert_equal_ops):
return array_ops.identity(tensors[0])
|
2c3043aceebd3bf44a0c2aecb4ed188a4a3d6629
| 3,636,496
|
def _get_igraph(G, edge_weights=None, node_weights=None):
"""
Transforms a NetworkX graph into an iGraph graph.
Parameters
----------
G : NetworkX DiGraph or Graph
The graph to be converted.
edge_weights: list or string
weights stored in edges in the original graph to be kept in new graph.
If None, no weight will be carried. See get_full_igraph to get all
weights and attributes into the graph.
node_weights: list or string
weights stored in nodes in the original graph to be kept in new graph.
If None, no weight will be carried. See get_full_igraph to get all
weights and attributes into the graph.
Returns
-------
iGraph graph
"""
if type(edge_weights) == str:
edge_weights = [edge_weights]
if type(node_weights) == str:
node_weights = [node_weights]
G = G.copy()
G = nx.relabel.convert_node_labels_to_integers(G)
Gig = ig.Graph(directed=True)
Gig.add_vertices(list(G.nodes()))
Gig.add_edges(list(G.edges()))
if 'kind' not in G.graph.keys():
G.graph['kind']=primal # if not specified, assume graph id primal
if G.graph['kind']=='primal':
Gig.vs['osmid'] = list(nx.get_node_attributes(G, 'osmid').values())
elif G.graph['kind']=='dual':
Gig.vs['osmid'] = list(G.edges)
if edge_weights != None:
for weight in edge_weights:
Gig.es[weight] = [n for _,_,n in G.edges(data=weight)]
if node_weights != None:
for weight in node_weights:
Gig.vs[weight] = [n for _,n in G.nodes(data=weight)]
for v in Gig.vs:
v['name'] = v['osmid']
return Gig
|
f444eac372d11c289bf157a24e9fccb5583ce500
| 3,636,497
|
def rename(isamAppliance, instance_id, id, new_name, check_mode=False, force=False):
"""
Deleting a file or directory in the administration pages root
:param isamAppliance:
:param instance_id:
:param id:
:param name:
:param check_mode:
:param force:
:return:
"""
dir_id = None
if force is False:
dir_id = _check(isamAppliance, instance_id, id, '')
if force is True or dir_id != None:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Renaming a directory in the administration pages root",
"/wga/reverseproxy/{0}/management_root/{1}".format(instance_id, id),
{
'id': dir_id,
'new_name': new_name,
'type': 'directory'
})
return isamAppliance.create_return_object()
|
a9d645bdbdc4d5804b57fe93625eba558a9c9c14
| 3,636,498
|
def deepset_update_global_fn(feats: jnp.ndarray) -> jnp.ndarray:
"""Global update function for graph net."""
# we want to sum-pool all our encoded nodes
#feats = feats.sum(axis=-1) # sum-pool
net = hk.Sequential(
[hk.Linear(128), jax.nn.elu,
hk.Linear(30), jax.nn.elu,
hk.Linear(11)]) # number of variabilities
return net(feats)
|
34fd3038ed56a494d2a09fa829cfe48d583cea49
| 3,636,499
|
def max_sum_naive(arr: list, length: int, index: int, prev_max: int) -> int:
"""
We can either take or leave the current number depending on previous max number
"""
if index >= length:
return 0
cur_max = 0
if arr[index] > prev_max:
cur_max = arr[index] + max_sum_naive(arr, length, index + 1, arr[index])
return max(cur_max, max_sum_naive(arr, length, index + 1, prev_max))
|
644b5cb294e78a10add253cad96d3c3e2c3d67d7
| 3,636,500
|
import torch
def accuracy(X, X_ref):
""" Compute classification accuracy.
Parameters
----------
X : torch.Tensor
The classification score tensor of shape [..., num_classes]
X_ref : torch.Tensor
The target integer labels of shape [...]
Returns
-------
The average accuarcy
"""
X_label = torch.argmax(X, dim=-1)
correct = (X_label == X_ref).sum()
return correct / np.prod(X.shape[:-1])
|
a62adda146de6573cdc190b636b0269852604608
| 3,636,501
|
from datetime import datetime
import uuid
def create_cert_builder(subject, issuer_name, public_key, days=365, is_ca=False):
"""
The method to create a builder for all types of certificates.
:param subject: The subject of the certificate.
:param issuer_name: The name of the issuer.
:param public_key: The public key of the certificate.
:param days: The number of days for which the certificate is valid. The default is 1 year or 365 days.
:param is_ca: Boolean to indicate if a cert is ca or non ca.
:return: The certificate builder.
:rtype: :class `x509.CertificateBuilder`
"""
builder = x509.CertificateBuilder()
builder = builder.subject_name(subject)
builder = builder.issuer_name(issuer_name)
builder = builder.public_key(public_key)
builder = builder.not_valid_before(datetime.today())
builder = builder.not_valid_after(datetime.today() + timedelta(days=days))
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.add_extension(
x509.BasicConstraints(ca=is_ca, path_length=None), critical=True
)
return builder
|
04739a7b81c3e4b6d70bba96e646e64c2d5fdbb7
| 3,636,503
|
def _table_row(line):
"""
Return all elements of a data line.
Return all elements of a data line. Simply splits it.
Parameters
----------
line: string
A stats line.
Returns
-------
list of strings
A list of strings, containing the data on the line, split at white space.
"""
return line.split()
|
dc5d76db80059b0da257b45f12513d75c2765d55
| 3,636,504
|
def ack_alert_alarm_definition(definition_id):
""" Acknowledge all alert(s) or an alarm(s) associated with the definition identified by definition_id.
"""
try:
# Get definition identified in request
definition = SystemEventDefinition.query.get(definition_id)
if definition is None:
message = 'Failed to retrieve SystemEventDefinition for id provided: %d' % definition_id
return bad_request(message)
# Verify definition is not in active state; otherwise error
if definition.active:
message = '%s definition must be disabled before clearing any associated instances.' % definition.event_type
return bad_request(message)
# Determine current user who is auto clearing alert or alarm instances (written to log)
assigned_user = User.query.get(g.current_user.id)
if assigned_user is not None:
name = assigned_user.first_name + ' ' + assigned_user.last_name
else:
name = 'Unknown/unassigned user with g.current_user.id: %s' % str(g.current_user.id)
# Identify default user and message for auto acknowledgment; log activity
ack_by = 1
ack_value = 'Log: Auto acknowledge (ooi-ui-services) OBO user \'%s\'; %s definition id: %d' % \
(name,definition.event_type, definition.id)
current_app.logger.info(ack_value)
# Get all active instances for this definition which have not been acknowledged.
instances = SystemEvent.query.filter_by(system_event_definition_id=definition.id,acknowledged=False).all()
for instance in instances:
if instance.event_type=='alarm':
if not (uframe_acknowledge_alert_alarm(instance.uframe_event_id, ack_value)):
message = 'Failed to acknowledge alarm (id:%d) in uframe, prior to clearing instance.' % instance.id
current_app.logger.info('[clear_alert_alarm] %s ' % message)
return bad_request(message)
# Update alert_alarm acknowledged, ack_by and ts_acknowledged
instance.acknowledged = True
instance.ack_by = ack_by
instance.ts_acknowledged = dt.datetime.strftime(dt.datetime.now(), "%Y-%m-%dT%H:%M:%S")
try:
db.session.add(instance)
db.session.commit()
except:
db.session.rollback()
return bad_request('IntegrityError during auto-acknowledgment of %s by %s.' %
(instance.event_type, str(ack_by)))
result = 'ok'
return jsonify( {'result' : result }), 200
except Exception as err:
message = 'Insufficient data, or bad data format. %s' % str(err.message)
current_app.logger.info(message)
return conflict(message)
|
6b15f6019fad506937ed0bcc0c6eeb34ce21faf4
| 3,636,505
|
def range2d(range_x, range_y):
"""Creates a 2D range."""
range_x = list(range_x)
return [ (x, y) for y in range_y for x in range_x ]
|
ca33799a277f0f72e99836e81a7ffc98b191fc37
| 3,636,506
|
import time
def segment(im, pad=0, caffemodel=None):
"""
Function which segments an input image. uses pyramidal method of scaling, performing
inference, upsampling results, and averaging results.
:param im: image to segment
:param pad: number of pixels of padding to add
:param caffemodel: path to caffemodel file
:return: The upsampled and averaged results of inference on input image at 3 scales.
"""
caffe.set_mode_gpu()
padded_image = add_padding(im, pad) # Add padding to original image
resized_images = resize_images(padded_image) # Resize original images
outputs = [classify(image, caffemodel=caffemodel) for image in resized_images] # Perform classification on images
upsample_start = time.time()
average_prob_maps = get_average_prob_maps(outputs, im.shape, pad)
print("Total segmenting time: {:.3f} ms".format((time.time() - upsample_start) * 1000))
return average_prob_maps
|
0e6f0d8cfd363c7b59007105178eaea3f0238261
| 3,636,509
|
def check_consistency( # pylint: disable=too-many-arguments
num_users=None,
num_items=None,
users_hat=None,
items_hat=None,
users=None,
items=None,
user_item_scores=None,
default_num_users=None,
default_num_items=None,
default_num_attributes=None,
num_attributes=None,
attributes_must_match=True,
):
"""Validate that the inputs to the recommender system are consistent
based on their dimensions. Furthermore, if all of the inputs
are consistent, we return the number of users and items that are inferred
from the inputs, or fall back to a provided default number.
Parameters
-----------
num_users: int, optional
An integer representing the number of users in the system
num_items: int, optional
An integer representing the number of items in the system
users_hat: :obj:`numpy.ndarray`, optional
A 2D matrix whose first dimension should be equal to the number of
users in the system. Typically this matrix refers to the system's
internal representation of user profiles, not the "true" underlying
user profiles, which are unknown to the system.
items_hat: :obj:`numpy.ndarray`, optional
A 2D matrix whose second dimension should be equal to the number of
items in the system. Typically this matrix refers to the system's
internal representation of item attributes, not the "true" underlying
item attributes, which are unknown to the system.
users: :obj:`numpy.ndarray`, optional
A 2D matrix whose first dimension should be equal to the number of
users in the system. This is the "true" underlying user profile
matrix.
items: :obj:`numpy.ndarray`, optional
A 2D matrix whose second dimension should be equal to the number of
items in the system. This is the "true" underlying item attribute
matrix.
user_item_scores: :obj:`numpy.ndarray`, optional
A 2D matrix whose first dimension is the number of users in the system
and whose second dimension is the number of items in the system.
default_num_users: int, optional
If the number of users is not specified anywhere in the inputs, we return
this value as the number of users to be returned.
default_num_items: int, optional
If the number of items is not specified anywhere in the inputs, we return
this value as the number of items to be returned.'
default_num_attributes: int, optional
If the number of attributes in the item/user representations is not
specified or cannot be inferred, this is the default number
of attributes that should be used. (This applies only to users_hat
and items_hat.)
num_attributes: int, optional
Check that the number of attributes per user & per item are equal to
this specified number. (This applies only to users_hat and items_hat.)
attributes_must_match: bool (optional, default: True)
Check that the user and item matrices match up on the attribute dimension.
If False, the number of columns in the user matrix and the number of
rows in the item matrix are allowed to be different.
Returns
--------
num_users: int
Number of users, inferred from the inputs (or provided default).
num_items: int
Number of items, inferred from the inputs (or provided default).
num_attributes: int (optional)
Number of attributes per item/user profile, inferred from inputs
(or provided default).
"""
if not is_array_valid_or_none(items_hat, ndim=2):
raise ValueError("items matrix must be a 2D matrix or None")
if not is_array_valid_or_none(users_hat, ndim=2):
raise ValueError("users matrix must be a 2D matrix or None")
if not is_valid_or_none(num_attributes, int):
raise TypeError("num_attributes must be an int")
num_items_vals = non_none_values(
getattr(items_hat, "shape", [None, None])[1],
getattr(items, "shape", [None, None])[1],
getattr(user_item_scores, "shape", [None, None])[1],
num_items,
)
num_users_vals = non_none_values(
getattr(users, "shape", [None])[0],
getattr(users_hat, "shape", [None])[0],
getattr(user_item_scores, "shape", [None])[0],
num_users,
)
num_users = resolve_set_to_value(
num_users_vals, default_num_users, "Number of users is not the same across inputs"
)
num_items = resolve_set_to_value(
num_items_vals, default_num_items, "Number of items is not the same across inputs"
)
if attributes_must_match:
# check attributes matching for users_hat and items_hat
num_attrs_vals = non_none_values(
getattr(users_hat, "shape", [None, None])[1],
getattr(items_hat, "shape", [None])[0],
num_attributes,
)
num_attrs = resolve_set_to_value(
num_attrs_vals,
default_num_attributes,
"User representation and item representation matrices are not "
"compatible with each other",
)
return num_users, num_items, num_attrs
else:
return num_users, num_items
|
4139a684751d25bef08d8f4806735be8769bb09e
| 3,636,510
|
from datetime import datetime
def verify_forgot_password(request):
"""
Check the forgot-password verification and possibly let the user
change their password because of it.
"""
# get form data variables, and specifically check for presence of token
formdata = _process_for_token(request)
if not formdata['has_userid_and_token']:
return render_404(request)
formdata_token = formdata['vars']['token']
formdata_userid = formdata['vars']['userid']
formdata_vars = formdata['vars']
# check if it's a valid user id
user = User.query.filter_by(id=formdata_userid).first()
if not user:
return render_404(request)
# check if we have a real user and correct token
if ((user and user.fp_verification_key and
user.fp_verification_key == unicode(formdata_token) and
datetime.datetime.now() < user.fp_token_expire
and user.email_verified and user.status == 'active')):
cp_form = auth_forms.ChangePassForm(formdata_vars)
if request.method == 'POST' and cp_form.validate():
user.pw_hash = auth_lib.bcrypt_gen_password_hash(
request.form['password'])
user.fp_verification_key = None
user.fp_token_expire = None
user.save()
messages.add_message(
request,
messages.INFO,
_("You can now log in using your new password."))
return redirect(request, 'mediagoblin.auth.login')
else:
return render_to_response(
request,
'mediagoblin/auth/change_fp.html',
{'cp_form': cp_form})
# in case there is a valid id but no user with that id in the db
# or the token expired
else:
return render_404(request)
|
7ff10e96701c2733702a717fe9bd4fd7103189d1
| 3,636,511
|
def extend_node(node, out_size, axis=-1, value=0):
"""Extend size of `node` array
For now, this function works same with `extend_array` method,
this is just an alias function.
Args:
node (numpy.ndarray): the array whose `axis` to be extended.
first axis is considered as "batch" axis.
out_size (int): target output size for specified `axis`.
axis (int): node feature axis to be extended.
Default is `axis=-1`, which extends only last axis.
value (int or float): value to be filled for extended place.
Returns (numpy.ndarray): extended `node` array, extended place is filled
with `value`
"""
return extend_arrays_to_size(
node, out_size=out_size, axis=axis, value=value)
|
08ef9f3f1cff5dce22ca8b4afacbc496e7d803ad
| 3,636,513
|
def forward(X, weights, bias):
"""
Simulate the forward pass on one layer.
:param X: input matrix.
:param weights: weight matrix.
:param bias: bias vector.
:return:
"""
a = np.matmul(weights, np.transpose(X))
b = np.reshape(np.repeat(bias, np.shape(X)[0], axis=0), np.shape(a))
output = sigmoid_activation(a + b)
y_pred = np.where(output < 0.5, 0, 1)
return y_pred
|
fb330d01a42965c003367997381ca8929200d57e
| 3,636,514
|
from typing import Any
def sanitize_for_params(x: Any) -> Any:
"""Sanitizes the input for a more flexible usage with AllenNLP's `.from_params()` machinery.
For now it is mainly used to transform numpy numbers to python types
Parameters
----------
x
The parameter passed on to `allennlp.common.FromParams.from_params()`
Returns
-------
sanitized_x
"""
# AllenNLP has a similar function (allennlp.common.util.sanitize) but it does not work for my purpose, since
# numpy types are checked only after the float type check, and:
# isinstance(numpy.float64(1), float) == True !!!
if isinstance(x, util.numpy.number):
return x.item()
elif isinstance(x, util.numpy.bool_):
# Numpy bool_ need to be converted to python bool.
return bool(x)
if isinstance(x, (str, float, int, bool)):
return x
elif isinstance(x, dict):
# Dicts need their values sanitized
return {key: sanitize_for_params(value) for key, value in x.items()}
# Lists and Tuples need their values sanitized
elif isinstance(x, list):
return [sanitize_for_params(x_i) for x_i in x]
elif isinstance(x, tuple):
return tuple(sanitize_for_params(x_i) for x_i in x)
# We include `to_json` function customize sanitization for user defined classes
elif hasattr(x, "to_json"):
return x.to_json()
return x
|
538e2268f15960683bfe85e03b96076e7f2241db
| 3,636,515
|
import dns.resolver
def host_ip():
"""Test fixture to resolve and return host_ip as a string."""
query = dns.resolver.query("scanme.nmap.org")
assert len(query) > 0, "could not resolve target host name"
return query[0].address
|
ee801bc2be6311fb1fe0805f5d3efb0a4fe589be
| 3,636,518
|
def get_subnet_from_list_by_id(subnet_id, subnets_list):
"""Get Neutron subnet by id from provided subnets list.
:param subnet_id: Neutron subnet ID
:param subnets_list: List of Neutron subnets, where target subnet should
be searched
"""
for subnet in subnets_list:
if subnet['id'] == subnet_id:
return subnet
LOG.warning("Cannot obtain subnet with id='%s' from provided subnets "
"list", subnet_id)
|
93e294131a96de321d18ce5a0e5d3b6fb5913b72
| 3,636,520
|
def grafana_logo(dispatcher):
"""Construct an image_element containing the locally hosted Grafana logo."""
return dispatcher.image_element(dispatcher.static_url(GRAFANA_LOGO_PATH), alt_text=GRAFANA_LOGO_ALT)
|
2311c77cf2b5054c3a103693a2d9b862a3e503af
| 3,636,521
|
import json
def is_json(payload):
"""Check if a payload is valid JSON."""
try:
json.loads(payload)
except (TypeError, ValueError):
return False
else:
return True
|
a02499ffd0a890fa4697f1002c5deb0fc894cac0
| 3,636,522
|
def gram_schmidt(M):
"""
@param M:
A mxn matrix whose columns to be orthogonalized
@return ret
Matrix whose columns being orthogonalized
"""
columns = M.T
res = []
res.append(columns[0])
for x in range(1, columns.shape[0]):
tmp = np.array([0 for x in range(M.shape[0])])
for vec in res:
y = (np.dot(vec, columns[x]) / np.dot(vec, vec)) * vec
tmp = tmp + y
res.append(columns[x] - tmp)
return np.array(res).T
|
e64b2ea4e36c3a5f8394887ba666b4c392d0284c
| 3,636,523
|
def horizon_main_nav(context):
""" Generates top-level dashboard navigation entries. """
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
dashboards = []
for dash in Horizon.get_dashboards():
if callable(dash.nav) and dash.nav(context):
dashboards.append(dash)
elif dash.nav:
dashboards.append(dash)
return {'components': dashboards,
'user': context['request'].user,
'current': getattr(current_dashboard, 'slug', None)}
|
40f2e5e5b8661d52d3688a04ac93b7c0d48b99f2
| 3,636,525
|
import warnings
def _filter_out_bad_segments(img1, seg1, img2, seg2):
"""
It's possible for shearing or scaling augmentation to sample
one segment completely out of the image- use this function
to filter out those cases
"""
minval = tf.reduce_min(tf.reduce_sum(seg1, [0,1])*tf.reduce_sum(seg2, [0,1]))
if minval < 0.5:
warnings.warn("filtering bad segment")
return False
else:
return True
|
fa74ae956c063c15b7fd1e8c21fb6e0788fc19e6
| 3,636,526
|
def _seconds_to_hours(time):
"""Convert time: seconds to hours"""
return time / 3600.0
|
d6abd9144882587833601e64d5c2226446f1bbdc
| 3,636,527
|
async def process_cmd_entry_erase(guild_id: int, txt_channel: str, bosses: list,
channel = None):
"""Processes boss `entry` `erase` subcommand.
Args:
guild_id (int): the id of the Discord guild of the originating message
txt_channel (str): the id of the channel of the originating message,
belonging to Discord guild of `guild_id`
bosses (list): a list of bosses to check
channel (int, optional): the channel for the record;
defaults to None
Returns:
str: an appropriate message for success or fail of command,
e.g. confirmation or list of entries
"""
if type(bosses) is str:
bosses = [bosses]
vdb = vaivora.db.Database(guild_id)
if channel and bosses in boss_conf['bosses']['world']:
records = await vdb.rm_entry_db_boss(bosses=bosses, channel=channel)
else:
records = await vdb.rm_entry_db_boss(bosses=bosses)
if records:
records = [f'**{record}**' for record in records]
return cleandoc(
f"""Your queried records ({len(records)}) have been """
f"""successfully erased.
- {bullet_point.join(records)}
"""
)
else:
return '*(But **nothing** happened...)*'
|
e385768fc34ebb419f51124e0a0f5a4e1577ad00
| 3,636,529
|
import warnings
import scipy
def mle_iid_gamma(n):
"""Perform maximum likelihood estimates for parameters for i.i.d.
NBinom measurements, parametrized by alpha, b=1/beta"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = scipy.optimize.minimize(
fun=lambda log_params, n: -log_like_iid_gamma_log_params(log_params, n),
x0=np.array([2, 1/300]),
args=(n,),
method='L-BFGS-B',
)
if res.success:
return res.x
else:
raise RuntimeError('Convergence failed with message', res.message)
|
089d181a85a72d42457c7ea1eae3aaabb3d6dd60
| 3,636,530
|
def build_tabnet_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "tab"),
"bbox":
[bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
|
a564f28751436be9648b60a7badae651be3c4583
| 3,636,531
|
from typing import Callable
from typing import Dict
from typing import Any
def make_raw_serving_input_receiver_fn(
feature_spec: features_specs_type,
transform_input_tensor: Callable[[Dict[str, tf.Tensor]], None],
is_model_canned_estimator: bool = False,
batched_predictions: bool = True
) -> Callable[[], tf.estimator.export.ServingInputReceiver]:
"""
Build the serving_input_receiver_fn used for serving/inference.
transform_input_tensor: method that takes the input tensors and will mutate them so prediction
will have its correct input. For instance, it could be to generate feature transfo from
"raw dimensions" tensors.
is_model_canned_estimator: if the model you want to serve is a canned estimator, the serving
function has to be generated differently
"""
def serving_input_receiver_fn() -> Any:
# generate all tensor placeholders:
raw_tensors, prediction_input_tensors = featurespec_to_input_placeholders(
feature_spec, batched_predictions)
# Add transformations (for instance, feature transfos) to prediction_input_tensors
transform_input_tensor(prediction_input_tensors)
if is_model_canned_estimator:
return tf.estimator.export.ServingInputReceiver(
features=prediction_input_tensors, receiver_tensors={},
receiver_tensors_alternatives={"raw_input": raw_tensors})
else:
return tf.estimator.export.ServingInputReceiver(
features=prediction_input_tensors, receiver_tensors=raw_tensors)
return serving_input_receiver_fn
|
2780f36ae373b1bd4623b6988ec7b4d130fb21ff
| 3,636,532
|
from typing import List
import json
from typing import Set
def load_parentheses_dataset(path: str, depths: List[int]) -> torchtext.data.Dataset:
"""
Load equation verification data as a sequential torchtext Dataset, in infix
notation with parentheses.
The Dataset is additionally populated with `leaf_vocab`, `unary_vocab`, and
`binary_vocab` sets.
"""
with open(path, "r") as f:
data_by_depth = json.load(f)
leaf_vocab: Set[str] = set()
unary_vocab: Set[str] = set()
binary_vocab: Set[str] = set()
def make_example(serialized):
tree = ExpressionTree.from_serialized(serialized["equation"])
label = int(serialized["label"] == "1")
left_root_index = sequence_root_index(tree.left)
right_root_index = sequence_root_index(tree.right)
nonlocal leaf_vocab, unary_vocab, binary_vocab
leaf_vocab = leaf_vocab.union(tree.leaf_vocab())
unary_vocab = unary_vocab.union(tree.unary_vocab())
binary_vocab = binary_vocab.union(tree.binary_vocab())
return torchtext.data.Example.fromlist(
[str(tree.left), str(tree.right), label, left_root_index, right_root_index],
list(_PARENTHESES_FIELD_MAP.items()),
)
examples = []
for depth in depths:
examples.extend(list(map(make_example, data_by_depth[depth - 1])))
dataset = torchtext.data.Dataset(examples, _PARENTHESES_FIELD_MAP)
dataset.leaf_vocab = leaf_vocab
dataset.unary_vocab = unary_vocab
dataset.binary_vocab = binary_vocab
return dataset
|
048c5cc42660c21ab59d8dd2dc205aeeafb22bd3
| 3,636,533
|
from typing import List
def get_angle(v1: List[int], v2: List[int]):
"""
:param v1: 2D vector
:param v2: 2D vector
:return: the angle of v1 and v2 in degree
"""
dot = np.dot(v1, v2)
norm = np.linalg.norm(v1) * np.linalg.norm(v2)
return np.degrees(np.arccos(dot / norm))
|
669a4119c1b6da1bcf0fb84f3d2ce0056acd8170
| 3,636,534
|
import logging
def get_plasma_intersection(lon, lat, alt, plasma_alt=300., tx_lon=-75.552,
tx_lat=45.403, tx_alt=0.07):
"""
This function finds where a ray from a transmitter toward a satellite
intersects the peak plasma in the middle.
*** PARAMS ***
Satellite ephemeris point(s): lon, lat, alt (deg, deg, km)
Transmitter location [optionally]: tx_lon, tx_lat, tx_alt (deg, deg, km)
Altitude of peak plasma density: plasma_alt (km.)
***RETURNS***
plasma_lon (float): longitude of plasma intersection(s)
plasma_lat (float): latitude of plasma intersection(s)
"""
vec_inp = True if (type(lon)==list or type(lon)==np.ndarray) else False
#lon = (lon + 360.) % 360.
#tx_lon = (tx_lon + 360.) % 360.
dist = haversine(lon, lat, tx_lon, tx_lat)
if dist > 2500.:
logging.error("This approximation isn't valid for large distances")
logging.error("dist: {0}".format(dist))
return (-1,-1)
if plasma_alt > np.min(alt):
logging.error("Input altitudes are too low for the plasma")
logging.error('plasma_alt: {0}'.format(plasma_alt))
logging.error('alt: {0}'.format(alt))
return (-1,-1)
if vec_inp:
tx_lon = tx_lon*np.ones(len(lon))
tx_lat = tx_lat*np.ones(len(lat))
tx_alt = tx_alt*np.ones(len(alt))
x = (plasma_alt/alt)*dist
#only need initial bearing
bearing,__ = get_bearing(tx_lon, tx_lat, lon, lat)
delta_EW = x*np.sin(np.deg2rad(bearing))
delta_NS = x*np.cos(np.deg2rad(bearing))
# convert to longitude (deg):
delta_lon = delta_EW*360./(2*np.pi*6371.*np.sin(np.deg2rad(lat)))
delta_lat = delta_NS*360./(2*np.pi*6371.)
plasma_lon = tx_lon + delta_lon
plasma_lat = tx_lat + delta_lat
logging.info('delta_EW, delta_NS: {0},{1}'.format(delta_EW, delta_NS))
logging.info('delta_lon, delta_lat: {0},{1}'.format(delta_lon, delta_lat))
logging.info('plasma_lon, plasma_lat: {0},{1}'.format(plasma_lon, plasma_lat))
return (plasma_lon, plasma_lat)
|
8234bf61ef2b0d501a723ef9553c6b63d3c51998
| 3,636,535
|
def plot_clusters(g, c):
"""
Draws a given graph g with vertex colours corresponding to clusters c and
displays the corresponding sizes of the clusters.
===========================================================================
Parameters
---------------------------------------------------------------------------
g : a graph
c : a list of vertex colours (clusters)
---------------------------------------------------------------------------
"""
if type(c) == dict:
c = list(c.values())
g.vs['color'] = c
g.vs['label'] = c
palette = ig.ClusterColoringPalette(len(g.vs))
df = pd.DataFrame(columns=['Frequency'])
df.index.name = 'Colour'
for i in set(c):
df.loc[int(i)] = [c.count(i)]
display(df)
visual_style = {}
visual_style['vertex_color'] = [palette[col] for col in g.vs['color']]
visual_style['vertex_label'] = [col for col in g.vs['color']]
visual_style['vertex_frame_width'] = 0
visual_style['bbox'] = (300, 300)
visual_style['margin'] = 10
return ig.plot(g, **visual_style)
|
dbeec2b421a23c7b503dc71e29cd7caca3300dc5
| 3,636,536
|
def make_cat_advanced(simple=True, yolo=False):
"""fill the categories manually"""
cat_list = get_cat_list(simple)
if simple:
if yolo:
cat_mapping = {
"benign": 0,
"malign": 1,
}
else:
cat_mapping = [0, 1]
return cat_list, cat_mapping
# The names from datainfo are used here!
cat_mapping = {
# malign
"Chondrosarkom": 1,
"Osteosarkom": 2,
"Ewing-Sarkom": 3,
"Plasmozytom / Multiples Myelom": 4,
"NHL vom B-Zell-Typ": 5,
# benign
"Osteochondrom": 6,
"Enchondrom": 7,
"Chondroblastom": 8,
"Osteoidosteom": 9,
"NOF": 10,
"Riesenzelltumor": 11,
"Chordom": 12,
"Hämangiom": 13,
"Knochenzyste, aneurysmatische": 14,
"Knochenzyste, solitär": 15,
"Dysplasie, fibröse": 16,
}
return cat_list, cat_mapping
|
5b4f0bac9126ce3a84ec5ea8d27203f7dfe41e10
| 3,636,537
|
import operator
def process_fuel(context):
"""
Reformats Fuel consumed
"""
fuel = {
0: 'Petrol',
1: 'Desiel'
}
data = []
totals = []
for index, type in enumerate(context['Fuel']):
litresSold = operator.sub(type.closing_meter, type.opening_meter)
total = operator.mul(litresSold, type.unit_price)
totals.append(total)
data.append([
{'type': fuel[index],
'opening_meter': type.opening_meter,
'closing_meter': type.closing_meter,
'unit_price': type.unit_price,
'litresSold': litresSold,
'total': total}])
return {
'data': data,
'total': totals
}
|
fea31cb306417cf1dfcef8859ed2585c2903849b
| 3,636,538
|
from bs4 import BeautifulSoup
import requests
def prepare_df_financials(
ticker: str, statement: str, quarter: bool = False
) -> pd.DataFrame:
"""Builds a DataFrame with financial statements for a given company
Parameters
----------
ticker : str
Company's stock ticker
statement : str
Either income, balance or cashflow
quarter : bool, optional
Return quarterly financial statements instead of annual, by default False
Returns
-------
pd.DataFrame
A DataFrame with financial info
Raises
------
ValueError
If statement is not income, balance or cashflow
"""
financial_urls = {
"income": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/income/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/income",
},
"balance": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet",
},
"cashflow": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow",
},
}
if statement not in financial_urls:
raise ValueError(f"type {statement} is not in {financial_urls.keys()}")
period = "quarter" if quarter else "annual"
text_soup_financials = BeautifulSoup(
requests.get(
financial_urls[statement][period].format(ticker),
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
# Define financials columns
a_financials_header = [
financials_header.text.strip("\n").split("\n")[0]
for financials_header in text_soup_financials.findAll(
"th", {"class": "overflow__heading"}
)
]
s_header_end_trend = ("5-year trend", "5- qtr trend")[quarter]
df_financials = pd.DataFrame(
columns=a_financials_header[0 : a_financials_header.index(s_header_end_trend)]
)
find_table = text_soup_financials.findAll(
"div", {"class": "element element--table table--fixed financials"}
)
if not find_table:
return df_financials
financials_rows = find_table[0].findAll(
"tr", {"class": ["table__row is-highlighted", "table__row"]}
)
for a_row in financials_rows:
constructed_row = []
financial_columns = a_row.findAll(
"td", {"class": ["overflow__cell", "overflow__cell fixed--column"]}
)
if not financial_columns:
continue
for a_column in financial_columns:
column_to_text = a_column.text.strip()
if "\n" in column_to_text:
column_to_text = column_to_text.split("\n")[0]
if column_to_text == "":
continue
constructed_row.append(column_to_text)
df_financials.loc[len(df_financials)] = constructed_row
return df_financials
|
1fdd3488c81bdf404764bba3b797f931ba77ad93
| 3,636,539
|
def build_si(cp, instruction):
"""
Build the integer representation of the source indices.
:param cp: CoreParameters instance for the target architecture
:param instruction: Instruction instance
:return: integer representation of si
"""
# Check sizing.
if len(instruction.source_indices) > 3:
exception_string = f"The {len(instruction.source_indices)} sources exceed the architecture's specified " \
+ f"maximum of 3."
raise AssemblyException(exception_string)
# Concatenate the values together.
si = 0
num_source_indices = len(instruction.source_indices)
if num_source_indices > 2:
if instruction.source_indices[2] != 0:
si |= instruction.source_indices[2]
si <<= cp.single_si_width
if num_source_indices > 1:
if instruction.source_indices[1] != 0:
si |= instruction.source_indices[1]
si <<= cp.single_si_width
if num_source_indices > 0:
if instruction.source_indices[0] != 0:
si |= instruction.source_indices[0]
# Check sizing, and return the integer.
si = int(si)
if si.bit_length() > cp.si_width:
raise AssemblyException("si exceeds its allotted bit width.")
return si
|
2d78d75486432c1e41847074ed819194b1f0e643
| 3,636,540
|
def getRegSampledPrfFitsByOffset(prfArray, colOffset, rowOffset):
"""
The 13x13 pixel PRFs on at each grid location are sampled at a 9x9 intra-pixel grid, to
describe how the PRF changes as the star moves by a fraction of a pixel in row or column.
To extract out a single PRF, you need to address the 117x117 array in a funny way
(117 = 13x9). Essentially you need to pull out every 9th element in the array, i.e.
.. code-block:: python
img = array[ [colOffset, colOffset+9, colOffset+18, ...],
[rowOffset, rowOffset+9, ...] ]
Inputs
------
prfArray
117x117 interleaved PRF array
colOffset, rowOffset
The offset used to address the column and row in the interleaved PRF
Returns
------
prf
13x13 PRF image for the specified column and row offset
"""
gridSize = 9
assert colOffset < gridSize
assert rowOffset < gridSize
# Number of pixels in regularly sampled PRF. Should be 13x13
nColOut, nRowOut = prfArray.shape
nColOut /= float(gridSize)
nRowOut /= float(gridSize)
iCol = colOffset + (np.arange(nColOut) * gridSize).astype(np.int)
iRow = rowOffset + (np.arange(nRowOut) * gridSize).astype(np.int)
tmp = prfArray[iRow, :]
prf = tmp[:,iCol]
return prf
|
551ec8624c9c41bca850cf5110d59f65179d6505
| 3,636,541
|
import click
def generate_list_display(object, attrs):
"""Generate a display string for an object based on some attributes.
Args:
object: An object which has specific attributes.
attrs: An interable of strings containing attributes to get from
the above object.
Returns:
A string containing a list display of the object with respect to
the passed in attributes.
"""
return "\n".join(
click.style(attr, bold=True) + ": %s" % getattr(object, attr)
for attr in attrs
)
|
17c876261bede0c38d91b4bd3e7b0048616f8cbf
| 3,636,542
|
import tempfile
def create_temporary_config_file():
""" Create a minimal config file with some default values
"""
toml_config = document()
toml_config.add("name", "Test Suite")
tmp_config_file = tempfile.NamedTemporaryFile(delete=False)
with tmp_config_file:
content = dumps(toml_config).encode("utf-8")
tmp_config_file.write(content)
return tmp_config_file.name
|
ff7c226eb035aa6b5d8e79efa2acc8a92a925659
| 3,636,543
|
def shear(image, shear_factor, **kwargs):
"""
Shear image.
For details see:
http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.AffineTransform
>>> image = np.eye(3, dtype='uint8')
>>> rotated = rotate(image, 45)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float shear_factor: Shear factor [0, 1]
:param kwargs kwargs: Keyword arguments for the underlying scikit-image
warp function, e.g. order=1 for linear interpolation.
:return: Sheared image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
set_default_order(kwargs)
transform = skt.AffineTransform(shear=shear_factor)
return skt.warp(image, transform, preserve_range=True,
**kwargs).astype('uint8')
|
97f6cc57d1aa41569c84601470242350b2805ffc
| 3,636,544
|
def segments():
"""Yields all segments in the unnannotated training Qatar-Living dataset."""
return (segment for document in documents() for segment in document.segments)
|
eea12bb25ca3c143c5b867987444e4d141982e94
| 3,636,545
|
from datetime import datetime
def get_options_between_dates(
start_date,
end_date):
"""get_options_between_dates
:param start_date: start date
:param end_date: end date
"""
valid_options = []
for rec in historical_options():
opt_date = datetime.datetime.strptime(
str(rec),
'%m-%d-%Y').date()
if start_date <= opt_date <= end_date:
valid_options.append(opt_date.strftime('%Y-%m-%d'))
return valid_options
|
c2528e85f5e1fce9f537639d0ec88ca20477b93d
| 3,636,546
|
def format_timestamp(timestamp_str, datetime_formatter):
"""Parse and stringify a timestamp to a specified format.
Args:
timestamp_str (str): A timestamp.
datetime_formatter (str): A format string.
Returns:
str: The formatted, stringified timestamp.
"""
try:
if '"' in timestamp_str or '\'' in timestamp_str:
# Make sure the timestamp is not surrounded by any quotes
timestamp_str = timestamp_str.replace('"', '')
timestamp_str = timestamp_str.replace('\'', '')
formatted_timestamp = (
dateutil_parser.parse(timestamp_str).strftime(datetime_formatter))
except (TypeError, ValueError) as e:
LOGGER.warn('Unable to parse/format timestamp: %s\n,'
' datetime_formatter: %s\n%s',
timestamp_str, datetime_formatter, e)
formatted_timestamp = None
return formatted_timestamp
|
528f1a5f7fd2a45de9d4ee77a8eaf29e06dcb310
| 3,636,547
|
import math
def dyn_stdev(val, prev_stdev, prev_mean, n):
"""Dynamic stdev: computes the standard deviation based on a previous stdev plus a new value. Useful when stdev
is built incrementally, it saves the usage of huge arrays.
Keyword arguments:
val -- new val to add to the mean
prev_stdev -- previous stdev value
n -- number of total elements in the mean including the new val
"""
if n < 1:
raise ValueError("n < 1, stdev only defined for a positive number of elements")
if n == 1:
return 0
curr_mean = dyn_mean(val, prev_mean, n)
return math.sqrt(((n-1)*prev_stdev*prev_stdev + (val - prev_mean)*(val - curr_mean)) / float(n))
|
ccf58f769650b209128bc370fc67144f82e68850
| 3,636,548
|
import warnings
def get_detail_backtest_results(input_df,
features,
return_col_name='returns',
equity_identifier='Equity Parent',
date_col_name='date',
n_bins=5,
bin_labels=None,
corr_method='spearman',
items_per_bin_deviation_threshold=1,
drop_months_outside_of_threshold=False):
"""
Description: This function generates the back testing results for a list of features.
This procedure does not handle subsetting for a specified date range.
Subset to a specified date range needs to be done prior to passing the input dataframe.
The procedure works on the assumption that the bin_labels are specified in descending order.
eg: ['Q1','Q2','Q3','Q4'] implies Q1 is the highest portfolio and Q4 is the lowest.
The generation of bins work ideally with a sufficient number of unique values in a feature.
The items_per_bin_deviation_threshold parameter can be used to decide how strict we want to be with the effect of non-unique values.
items_per_bin_deviation_threshold acts on the difference between the expected number of items in a bin
vs the actual number of items.
drop_months_outside_of_threshold can be set to True, if the months deviating from the
above threshold should be excluded from back testing.
:param input_df: Type pandas dataframe. long format dataframe.
:param features: Type list. list of features for which backtesting needs to be perforrmed. These should correspond
to the names of the columns in the df_long dataframe.
:param return_col_name: Type str. Name of the return column.
:param equity_identifier : Type str. Name of the equity identifier column.
:param date_col_name:Type str. Name of the date column.
:param n_bins:Type int. number of bins to split the equities into.
:param bin_labels:Type list. list of bin labels. It is assumed that the labels are in descending order.
eg: ['Q1','Q2','Q3'] implies Q1 is the highest portfolio and Q3 is the lowest.
:param corr_method:Type string. correlation method being used.
:param items_per_bin_deviation_threshold:Type int. Permissible deviation from the expected number of items per bin.
:param drop_months_outside_of_threshold:Type boolean. Decision to drop months that break deviate beyond the acceptable
items_per_bin_deviation_threshold.
:return:Type pandas dataframe. detail backtesting results for each period
"""
if bin_labels is None:
bin_labels = ['Q' + str(i + 1) for i in range(n_bins)]
df_long = input_df.copy()
long_cols = list(df_long.columns)
if date_col_name not in long_cols:
df_long = df_long.reset_index()
df_long.rename(columns={'index': 'date'}, inplace=True)
if return_col_name in features:
features.remove(return_col_name)
detail_results = []
features = sorted(features)
feature_cnt = 0
total_features = len(features)
print('Total features for processing: ' + str(total_features))
warnings.formatwarning = custom_formatwarning
for feature in features:
category = feature.split('_bshift')[0]
feature_cols = [equity_identifier, date_col_name, return_col_name, feature]
df_feature_detail = df_long[feature_cols].copy()
df_feature_detail = get_ranks(df_feature_detail,
date_col_name,
feature)
df_feature_detail = add_bins_col_to_rank_df(df_feature_detail,
n_bins)
df_bin_check = pd.DataFrame(df_feature_detail.groupby(date_col_name)['bin_no'].max())
bin_check_mask = df_bin_check['bin_no'] != n_bins
insufficient_bins_dates = [item.date().strftime("%Y-%m-%d") for item in df_bin_check[bin_check_mask].index.tolist()]
if len(insufficient_bins_dates) > 0:
warnings.warn('\nInsufficient bins warning:\nFeature: ' + feature+'\n'+'\n' +
'Months with insufficient bins:' + str(insufficient_bins_dates)+ '\n' + '\n' +
'These months are excluded from the back testing computation')
df_feature_detail = df_feature_detail[~df_feature_detail[date_col_name].isin(insufficient_bins_dates)]
print(df_feature_detail.shape)
total_no_of_items = df_feature_detail[equity_identifier].unique().shape[0]
expected_no_of_items_per_bin = total_no_of_items/n_bins
mask_bin_lowest = df_feature_detail['bin_no'] == 1
mask_bin_highest = df_feature_detail['bin_no'] == n_bins
df_bin_lowest = df_feature_detail[mask_bin_lowest].copy()
df_bin_highest = df_feature_detail[mask_bin_highest].copy()
bin_lowest_bad_dates = get_dates_deviating_from_threshold(df_bin_lowest,
date_col_name,
equity_identifier,
items_per_bin_deviation_threshold,
expected_no_of_items_per_bin)
bin_highest_bad_dates = get_dates_deviating_from_threshold(df_bin_highest,
date_col_name,
equity_identifier,
items_per_bin_deviation_threshold,
expected_no_of_items_per_bin)
if len(bin_lowest_bad_dates) > 0 or len(bin_highest_bad_dates) > 0:
warnings.warn('\nDeviation from threshold warning:\nFeature: ' + feature+'\n'+'\n' +
'Top Portfolio - Months which deviate from threshold: '+str(bin_highest_bad_dates)+'\n'+'\n' +
'Bottom Portfolio - Months which deviate from threshold: '+str(bin_lowest_bad_dates))
if drop_months_outside_of_threshold:
months_to_drop = bin_lowest_bad_dates + bin_highest_bad_dates
warnings.warn('\nMonths dropped warning:\nFeature: ' + feature + '\n'+'\n' +
'Months: '+str(months_to_drop) +' will be dropped from computation')
df_feature_detail = df_feature_detail[~df_feature_detail[date_col_name].isin(months_to_drop)]
df_feature_detail_agg = compute_date_level_metrics(df_feature_detail,
bin_labels,
date_col_name,
return_col_name,
feature,
corr_method)
df_feature_detail_agg['feature'] = feature
df_feature_detail_agg['category'] = category
detail_results.append(df_feature_detail_agg)
feature_cnt += 1
if feature_cnt % 100 == 0:
print(str(feature_cnt) + ' features completed')
detail_results_df = pd.concat(detail_results)
return detail_results_df
|
702f80d378d12e3570af6bf69a786ec913eed4e9
| 3,636,549
|
from typing import Counter
def check_train_balance(df,idx_train,keys):
"""
check the balance of the training set.
if only one of the classes has more 2 instances than the other
we will randomly take out those 'extra instances' from the major
class
"""
Counts = dict(Counter(df.iloc[idx_train]['targets'].values))
if np.abs(Counts[keys[0]] - Counts[keys[1]]) > 2:
if Counts[keys[0]] > Counts[keys[1]]:
key_major = keys[0]
key_minor = keys[1]
else:
key_major = keys[1]
key_minor = keys[0]
ids_major = df.iloc[idx_train]['id'][df.iloc[idx_train]['targets'] == key_major]
idx_train_new = idx_train.copy()
for n in range(len(idx_train_new)):
random_pick = np.random.choice(np.unique(ids_major),size = 1)[0]
# print(random_pick,np.unique(ids_major))
idx_train_new = np.array([item for item,id_temp in zip(idx_train_new,df.iloc[idx_train_new]['id']) if (id_temp != random_pick)])
ids_major = np.array([item for item in ids_major if (item != random_pick)])
new_counts = dict(Counter(df.iloc[idx_train_new]['targets']))
if np.abs(new_counts[keys[0]] - new_counts[keys[1]]) > 3:
if new_counts[keys[0]] > new_counts[keys[1]]:
key_major = keys[0]
key_minor = keys[1]
else:
key_major = keys[1]
key_minor = keys[0]
ids_major = df.iloc[idx_train_new]['id'][df.iloc[idx_train_new]['targets'] == key_major]
elif np.abs(new_counts[keys[0]] - new_counts[keys[1]]) < 3:
break
return idx_train_new
else:
return idx_train
|
d99c9e1c4ae0d6124da576b91ce2b2786d53f07b
| 3,636,551
|
def eye(w, n):
"""Create diagonal matrix with w on diagonal."""
return np.array([[w if i==j else 0.0*w for i in range(n)] for j in range(n)])
|
e12ff719981ff7311c21339ad651d7bd38f204f6
| 3,636,552
|
def Messaging():
"""
Messaging
Creates JMS resources.
Only to use in a resource block.
"""
if state().block and state().block != 'resources':
raise SyntaxError('Messaging can only be used in a resources block')
return subscope('messaging')
|
9017c8c0452cf75b05f422d6335b8ba5bcd7bc90
| 3,636,553
|
from datetime import datetime
def timestamp2WP(timestamp):
"""
Converts a Unix Epoch-based timestamp (seconds since Jan. 1st 1970 GMT)
timestamp to one acceptable by Wikipedia.
:Parameters:
timestamp : int
Unix timestamp to be converted
:Return:
string Wikipedia style timestamp
"""
return datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y%m%d%H%M%S')
|
c4b9bef9e555c178991569472f3962c7a17d996c
| 3,636,554
|
def get_available_node_types(nodenet_uid):
""" Return a dict of available built-in node types and native module types"""
return True, runtime.get_available_node_types(nodenet_uid)
|
509730edf1c3ea7958a7356e3c784893c2b4c769
| 3,636,555
|
def dos_element_orbitals(
folder,
element_orbital_dict,
output='dos_element_orbitals.png',
fill=True,
alpha=0.3,
linewidth=1.5,
sigma=0.05,
energyaxis='x',
color_list=None,
legend=True,
total=True,
figsize=(4, 3),
erange=[-6, 6],
spin='up',
soc_axis=None,
combination_method='add',
fontsize=12,
save=True,
shift_efermi=0,
):
"""
This function plots the element projected density of states on specific orbitals.
Parameters:
folder (str): This is the folder that contains the VASP files
element_orbital_dict (dict[str:list]): A dictionary that contains the individual elements and the corresponding
orbitals to project onto. For example, if the user wants to project onto the s, py, pz, and px orbitals
of In and the s orbital of As for and InAs structure then the dictionary would be {'In':[0,1,2,3], 'As':[0]}
output (str): File name of the resulting plot.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
linewidth (float): Linewidth of lines
sigma (float): Standard deviation for gaussian filter
energyaxis (str): Determines the axis to plot the energy on ('x' or 'y')
color_list (list): List of colors that is the same length as the number of projections
in the plot.
legend (bool): Determines whether to draw the legend or not
total (bool): Determines wheth to draw the total density of states or not
spin (str): Which spin direction to parse ('up' or 'down')
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list): Energy range for the DOS plot ([lower bound, upper bound])
combination_method (str): If spin == 'both', this determines if the spin up and spin down
desnities are added or subtracted. ('add' or 'sub')
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
dos = Dos(shift_efermi=shift_efermi, folder=folder, spin=spin, combination_method=combination_method)
fig = plt.figure(figsize=figsize, dpi=400)
ax = fig.add_subplot(111)
_figure_setup_dos(ax=ax, fontsize=fontsize, energyaxis=energyaxis)
dos.plot_element_orbitals(
ax=ax,
element_orbital_dict=element_orbital_dict,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis=energyaxis,
color_list=color_list,
legend=legend,
total=total,
erange=erange,
)
plt.tight_layout(pad=0.4)
if save:
plt.savefig(output)
else:
return fig, ax
|
957e21298077ece088ef5f6c2c2c7ad5c3e599aa
| 3,636,556
|
def get_ceph_nodes():
"""Query named relation 'ceph' to determine current nodes."""
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
|
35ee1775c9e4d2636e8373cf0936e6e1a8cb0b76
| 3,636,557
|
def metadataAbstractElementIllegalChildElementTest1():
"""
No child elements, child elements not allowed.
>>> doctestMetadataAbstractElementFunction(
... testMetadataAbstractElementIllegalChildElements,
... metadataAbstractElementIllegalChildElementTest1())
[]
"""
metadata = """<?xml version="1.0" encoding="UTF-8"?>
<test>
</test>
"""
return ElementTree.fromstring(metadata)
|
c8405bbe81db5d86941a68c62ba19a6576789e50
| 3,636,558
|
def discover_fields(layout):
"""Discover all fields defined in a layout object
This is used to avoid defining the field list in two places --
the layout object is instead inspected to determine the list
"""
fields = []
try:
comps = list(layout)
except TypeError:
return fields
for comp in comps:
if isinstance(comp, str):
fields.append(comp)
else:
fields.extend(discover_fields(comp))
return fields
|
359a6ed1d23e1c56a699895e8c15a93bce353750
| 3,636,559
|
def replace(project_symbols):
"""
replace old source with non annotated signatures
:param project_symbols: symbols we will use to write out new source code
:return: bool
"""
for module_symbols in project_symbols:
if not write_new_source(module_symbols, access_attribute("get_non_annotated_source")):
return False
return True
|
b3a00199f336b6711fba2096bec9a0fc0c4976b8
| 3,636,560
|
def element_list_as_string(elements):
"""Flatten a list of elements into a space separated string."""
names = []
for element in elements:
if isinstance(element, AOVGroup):
names.append("@{}".format(element.name))
else:
names.append(element.variable)
return " ".join(names)
|
baa0c1aaa6bd11932f388756c807c9240abb3958
| 3,636,561
|
def encode_corpus(storage: LetterStorage, corpus: tuple) -> tuple:
"""
Encodes sentences by replacing letters with their ids
:param storage: an instance of the LetterStorage class
:param corpus: a tuple of sentences
:return: a tuple of the encoded sentences
"""
if not isinstance(storage, LetterStorage) or not isinstance(corpus, tuple):
return ()
encoded_corpus = []
for element in corpus:
list_element = []
for word in element:
list_element.append(tuple([storage.get_id_by_letter(letter) for letter in word]))
encoded_corpus.append(tuple(list_element))
return tuple(encoded_corpus)
|
0fa6b4c6b5dd4a33c9e9aee8b1c81fdb119625a7
| 3,636,563
|
from typing import Type
def aggregated_column_unique(chart: Type[BaseChart], data):
"""
description:
main function to calculate histograms
input:
- chart
- data
output:
list_of_unique_values
"""
a_range = cuda.to_device(np.array([chart.min_value, chart.max_value]))
temp_df = cudf.DataFrame()
temp_df.add_column(
chart.x,
get_binwise_reduced_column(
data[chart.x].copy().to_gpu_array(), chart.stride, a_range
),
)
return temp_df[chart.x].unique().to_pandas().tolist()
|
79f2f896e5a8ad06dba5589896eadfe224e42246
| 3,636,564
|
def collocations_table_exist(con):
"""Return True if the collocations table exist"""
query = con.query(
"select 1 from information_schema.tables "
"where table_name='collocations'")
return bool(list(query.dictresult()))
|
9ffa05f698056d9fab6bb9651427b6bc64f414ea
| 3,636,566
|
from bs4 import BeautifulSoup
import re
def ftp_profile(publish_settings):
"""Takes PublishSettings, extracts ftp user, password, and host"""
soup = BeautifulSoup(publish_settings, 'html.parser')
profiles = soup.find_all('publishprofile')
ftp_profile = [profile for profile in profiles if profile['publishmethod'] == 'FTP'][0]
matches = re.search('ftp://(.+)/site/wwwroot', ftp_profile['publishurl'])
host = matches.group(1) if matches else ''
username = ftp_profile['username'].replace("\\$", "%5C%24")
password = ftp_profile['userpwd']
return host, username, password, ftp_profile['publishurl']
|
003218e6d58d01afcbf062a14e68294d0033b8af
| 3,636,567
|
from typing import List
def train(name,train_data:List[tuple],test_data=None)->tuple:
"""
Train Naive Bayes Classifier for Multinomial Models
:param list train_data: list train data of tuple (text,tag)
:param object get_features: function of features
:param list test_data: list test data of tuple (text,tag)
:return: tuple(model,accuracy)
"""
X_data=[]
y_data=[]
for sent in train_data:
X_data.append(sent[0])
y_data.append(sent[1])
nb.fit(X_data, y_data)
if test_data!=None:
X_test=[]
y_test=[]
for sent in test_data:
X_test.append(sent[0])
y_test.append(sent[1])
y_pred = nb.predict(X_test)
return (nb, accuracy_score(y_pred, y_test))
return (nb,)
|
32fce3f0c69bdb85878549f95c159a1277104f97
| 3,636,568
|
def validate_article(article_json):
"""
Validate the content of a raw article
"""
if article_json is None:
return False
try:
# Filter title
if not vstrlen(article_json['title'], 16):
return False
# Filter contents
if not vstrlen(article_json['contents'], 30):
return False
# Filter Outlet
if not vstrlen(article_json['feedlabel']):
return False
# Filter Article link
if not vstrlen(article_json['url'], 10):
return False
except KeyError:
return False
return True
|
493a539ec933d43980a7724afadc6a478b4a1a6a
| 3,636,569
|
def get_model_memory_usage(batch_size, model):
"""
Estimate how much memory the model will take, assuming all parameters is in float32
and float32 takes 4 bytes (32 bits) in memory.
:param batch_size:
:param model:
:return:
"""
# Calculate the total number of outputs from all layers
shapes_mem_count = 0
for l in model.layers:
single_layer_mem = 1
for s in l.output_shape:
if s is None:
continue
single_layer_mem *= s
shapes_mem_count += single_layer_mem
# Calculate the total number of trainable parameters
trainable_count = np.sum(
[keras.backend.count_params(p) for p in set(model.trainable_weights)]
)
# Calculate the total number of non trainable parameters
non_trainable_count = np.sum(
[keras.backend.count_params(p) for p in set(model.non_trainable_weights)]
)
# total memory = 4 bytes * total number of numbers in each run * number of images in each run
total_memory = 4.0 * batch_size * (shapes_mem_count + 2 * trainable_count + non_trainable_count)
# convert to GB
gbytes = np.round(total_memory / (1024.0 ** 3), 3)
return gbytes
|
0452e9943ff2a0c9dbcb8c870237344740502fe4
| 3,636,570
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.