hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1302788bd59603944e6bbc625a12406a2c44fa
| 8,016
|
py
|
Python
|
frappe/desk/notifications.py
|
kwiesmueller/frappe
|
6a748661c2140b15fd43437477f2ea6eef6b5de0
|
[
"MIT"
] | null | null | null |
frappe/desk/notifications.py
|
kwiesmueller/frappe
|
6a748661c2140b15fd43437477f2ea6eef6b5de0
|
[
"MIT"
] | 5
|
2020-12-04T21:08:07.000Z
|
2022-03-12T00:39:56.000Z
|
frappe/desk/notifications.py
|
kwiesmueller/frappe
|
6a748661c2140b15fd43437477f2ea6eef6b5de0
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.desk.doctype.notification_settings.notification_settings import get_subscribed_documents
from six import string_types
import json
@frappe.whitelist()
@frappe.read_only()
def get_notifications():
out = {
"open_count_doctype": {},
"targets": {},
}
if (frappe.flags.in_install or
not frappe.db.get_single_value('System Settings', 'setup_complete')):
return out
config = get_notification_config()
if not config:
return out
groups = list(config.get("for_doctype")) + list(config.get("for_module"))
cache = frappe.cache()
notification_count = {}
notification_percent = {}
for name in groups:
count = cache.hget("notification_count:" + name, frappe.session.user)
if count is not None:
notification_count[name] = count
out['open_count_doctype'] = get_notifications_for_doctypes(config, notification_count)
out['targets'] = get_notifications_for_targets(config, notification_percent)
return out
def get_notifications_for_doctypes(config, notification_count):
"""Notifications for DocTypes"""
can_read = frappe.get_user().get_can_read()
open_count_doctype = {}
for d in config.for_doctype:
if d in can_read:
condition = config.for_doctype[d]
if d in notification_count:
open_count_doctype[d] = notification_count[d]
else:
try:
if isinstance(condition, dict):
result = frappe.get_list(d, fields=["count(*) as count"], filters=condition, ignore_ifnull=True)[0].count
else:
result = frappe.get_attr(condition)()
except frappe.PermissionError:
frappe.clear_messages()
pass
# frappe.msgprint("Permission Error in notifications for {0}".format(d))
except Exception as e:
# OperationalError: (1412, 'Table definition has changed, please retry transaction')
# InternalError: (1684, 'Table definition is being modified by concurrent DDL statement')
if e.args and e.args[0] not in (1412, 1684):
raise
else:
open_count_doctype[d] = result
frappe.cache().hset("notification_count:" + d, frappe.session.user, result)
return open_count_doctype
def get_notifications_for_targets(config, notification_percent):
"""Notifications for doc targets"""
can_read = frappe.get_user().get_can_read()
doc_target_percents = {}
# doc_target_percents = {
# "Company": {
# "Acme": 87,
# "RobotsRUs": 50,
# }, {}...
# }
for doctype in config.targets:
if doctype in can_read:
if doctype in notification_percent:
doc_target_percents[doctype] = notification_percent[doctype]
else:
doc_target_percents[doctype] = {}
d = config.targets[doctype]
condition = d["filters"]
target_field = d["target_field"]
value_field = d["value_field"]
try:
if isinstance(condition, dict):
doc_list = frappe.get_list(doctype, fields=["name", target_field, value_field],
filters=condition, limit_page_length = 100, ignore_ifnull=True)
except frappe.PermissionError:
frappe.clear_messages()
pass
except Exception as e:
if e.args[0] not in (1412, 1684):
raise
else:
for doc in doc_list:
value = doc[value_field]
target = doc[target_field]
doc_target_percents[doctype][doc.name] = (value/target * 100) if value < target else 100
return doc_target_percents
def clear_notifications(user=None):
if frappe.flags.in_install:
return
cache = frappe.cache()
config = get_notification_config()
if not config:
return
for_doctype = list(config.get('for_doctype')) if config.get('for_doctype') else []
for_module = list(config.get('for_module')) if config.get('for_module') else []
groups = for_doctype + for_module
for name in groups:
if user:
cache.hdel("notification_count:" + name, user)
else:
cache.delete_key("notification_count:" + name)
frappe.publish_realtime('clear_notifications')
def clear_notification_config(user):
frappe.cache().hdel('notification_config', user)
def delete_notification_count_for(doctype):
frappe.cache().delete_key("notification_count:" + doctype)
frappe.publish_realtime('clear_notifications')
def clear_doctype_notifications(doc, method=None, *args, **kwargs):
config = get_notification_config()
if not config:
return
if isinstance(doc, string_types):
doctype = doc # assuming doctype name was passed directly
else:
doctype = doc.doctype
if doctype in config.for_doctype:
delete_notification_count_for(doctype)
return
@frappe.whitelist()
def get_notification_info():
config = get_notification_config()
out = get_notifications()
can_read = frappe.get_user().get_can_read()
conditions = {}
module_doctypes = {}
doctype_info = dict(frappe.db.sql("""select name, module from tabDocType"""))
for d in list(set(can_read + list(config.for_doctype))):
if d in config.for_doctype:
conditions[d] = config.for_doctype[d]
if d in doctype_info:
module_doctypes.setdefault(doctype_info[d], []).append(d)
out.update({
"conditions": conditions,
"module_doctypes": module_doctypes,
})
return out
def get_notification_config():
user = frappe.session.user or 'Guest'
def _get():
subscribed_documents = get_subscribed_documents()
config = frappe._dict()
hooks = frappe.get_hooks()
if hooks:
for notification_config in hooks.notification_config:
nc = frappe.get_attr(notification_config)()
for key in ("for_doctype", "for_module", "for_other", "targets"):
config.setdefault(key, {})
if key == "for_doctype":
if len(subscribed_documents) > 0:
key_config = nc.get(key, {})
subscribed_docs_config = frappe._dict()
for document in subscribed_documents:
if key_config.get(document):
subscribed_docs_config[document] = key_config.get(document)
config[key].update(subscribed_docs_config)
else:
config[key].update(nc.get(key, {}))
else:
config[key].update(nc.get(key, {}))
return config
return frappe.cache().hget("notification_config", user, _get)
def get_filters_for(doctype):
'''get open filters for doctype'''
config = get_notification_config()
doctype_config = config.get("for_doctype").get(doctype, {})
filters = doctype_config if not isinstance(doctype_config, string_types) else None
return filters
@frappe.whitelist()
@frappe.read_only()
def get_open_count(doctype, name, items=[]):
'''Get open count for given transactions and filters
:param doctype: Reference DocType
:param name: Reference Name
:param transactions: List of transactions (json/dict)
:param filters: optional filters (json/list)'''
if frappe.flags.in_migrate or frappe.flags.in_install:
return {
"count": []
}
frappe.has_permission(doc=frappe.get_doc(doctype, name), throw=True)
meta = frappe.get_meta(doctype)
links = meta.get_dashboard_data()
# compile all items in a list
if not items:
for group in links.transactions:
items.extend(group.get("items"))
if not isinstance(items, list):
items = json.loads(items)
out = []
for d in items:
if d in links.get("internal_links", {}):
# internal link
continue
filters = get_filters_for(d)
fieldname = links.get("non_standard_fieldnames", {}).get(d, links.fieldname)
data = {"name": d}
if filters:
# get the fieldname for the current document
# we only need open documents related to the current document
filters[fieldname] = name
total = len(frappe.get_all(d, fields="name",
filters=filters, limit=100, distinct=True, ignore_ifnull=True))
data["open_count"] = total
total = len(frappe.get_all(d, fields="name",
filters={fieldname: name}, limit=100, distinct=True, ignore_ifnull=True))
data["count"] = total
out.append(data)
out = {
"count": out,
}
if not meta.custom:
module = frappe.get_meta_module(doctype)
if hasattr(module, "get_timeline_data"):
out["timeline_data"] = module.get_timeline_data(doctype, name)
return out
| 28.628571
| 111
| 0.718189
|
4a1302f1784f954bb5d17dfb86beac11d1580002
| 4,879
|
py
|
Python
|
src/super_image/models/ddbpn/modeling_ddbpn.py
|
eugenesiow/super-image
|
44099ee61cbed0d6f54e563ce55bc36cd2565868
|
[
"Apache-2.0"
] | 17
|
2021-07-29T07:22:53.000Z
|
2022-03-30T16:23:38.000Z
|
src/super_image/models/ddbpn/modeling_ddbpn.py
|
eugenesiow/super-image
|
44099ee61cbed0d6f54e563ce55bc36cd2565868
|
[
"Apache-2.0"
] | 1
|
2021-10-17T10:10:01.000Z
|
2021-10-17T19:47:13.000Z
|
src/super_image/models/ddbpn/modeling_ddbpn.py
|
eugenesiow/super-image
|
44099ee61cbed0d6f54e563ce55bc36cd2565868
|
[
"Apache-2.0"
] | 5
|
2021-09-14T13:25:08.000Z
|
2022-03-30T16:23:33.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .configuration_ddbpn import DdbpnConfig
from ...modeling_utils import (
MeanShift,
PreTrainedModel
)
def projection_conv(in_channels, out_channels, scale, up=True):
kernel_size, stride, padding = {
2: (6, 2, 2),
4: (8, 4, 2),
8: (12, 8, 2)
}[scale]
if up:
conv_f = nn.ConvTranspose2d
else:
conv_f = nn.Conv2d
return conv_f(
in_channels, out_channels, kernel_size,
stride=stride, padding=padding
)
class DenseProjection(nn.Module):
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
super(DenseProjection, self).__init__()
if bottleneck:
self.bottleneck = nn.Sequential(*[
nn.Conv2d(in_channels, nr, 1),
nn.PReLU(nr)
])
inter_channels = nr
else:
self.bottleneck = None
inter_channels = in_channels
self.conv_1 = nn.Sequential(*[
projection_conv(inter_channels, nr, scale, up),
nn.PReLU(nr)
])
self.conv_2 = nn.Sequential(*[
projection_conv(nr, inter_channels, scale, not up),
nn.PReLU(inter_channels)
])
self.conv_3 = nn.Sequential(*[
projection_conv(inter_channels, nr, scale, up),
nn.PReLU(nr)
])
def forward(self, x):
if self.bottleneck is not None:
x = self.bottleneck(x)
a_0 = self.conv_1(x)
b_0 = self.conv_2(a_0)
e = b_0.sub(x)
a_1 = self.conv_3(e)
out = a_0.add(a_1)
return out
class DdbpnModel(PreTrainedModel):
config_class = DdbpnConfig
def __init__(self, args):
super(DdbpnModel, self).__init__(args)
scale = args.scale
n0 = 128
nr = 32
self.depth = 6
rgb_mean = args.rgb_mean
rgb_std = args.rgb_std
self.sub_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std)
initial = [
nn.Conv2d(args.n_colors, n0, 3, padding=1),
nn.PReLU(n0),
nn.Conv2d(n0, nr, 1),
nn.PReLU(nr)
]
self.initial = nn.Sequential(*initial)
self.upmodules = nn.ModuleList()
self.downmodules = nn.ModuleList()
channels = nr
for i in range(self.depth):
self.upmodules.append(
DenseProjection(channels, nr, scale, True, i > 1)
)
if i != 0:
channels += nr
channels = nr
for i in range(self.depth - 1):
self.downmodules.append(
DenseProjection(channels, nr, scale, False, i != 0)
)
channels += nr
reconstruction = [
nn.Conv2d(self.depth * nr, args.n_colors, 3, padding=1)
]
self.reconstruction = nn.Sequential(*reconstruction)
self.add_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
def forward(self, x):
x = self.sub_mean(x)
x = self.initial(x)
h_list = []
l_list = []
for i in range(self.depth - 1):
if i == 0:
l = x
else:
l = torch.cat(l_list, dim=1)
h_list.append(self.upmodules[i](l))
l_list.append(self.downmodules[i](torch.cat(h_list, dim=1)))
h_list.append(self.upmodules[-1](torch.cat(l_list, dim=1)))
out = self.reconstruction(torch.cat(h_list, dim=1))
out = self.add_mean(out)
return out
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') >= 0:
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
| 30.304348
| 95
| 0.520189
|
4a130338a9cd03867e78cfb8f33ac3d030229248
| 3,163
|
py
|
Python
|
ptah/form/vocabulary.py
|
timgates42/ptah
|
47594cef8e80397a545bdc9e166eafcac94c72d6
|
[
"BSD-3-Clause"
] | 13
|
2015-03-18T16:06:50.000Z
|
2021-04-27T19:14:35.000Z
|
ptah/form/vocabulary.py
|
timgates42/ptah
|
47594cef8e80397a545bdc9e166eafcac94c72d6
|
[
"BSD-3-Clause"
] | null | null | null |
ptah/form/vocabulary.py
|
timgates42/ptah
|
47594cef8e80397a545bdc9e166eafcac94c72d6
|
[
"BSD-3-Clause"
] | 6
|
2015-01-07T11:17:32.000Z
|
2020-04-02T11:35:03.000Z
|
from zope.interface import implementer
from pyramid.compat import string_types
from ptah.form.interfaces import ITerm, IVocabulary
@implementer(ITerm)
class Term(object):
"""Simple tokenized term used by Vocabulary."""
def __init__(self, value, token=None,
title=None, description=None, **kw):
"""Create a term for value and token. If token is omitted,
str(value) is used for the token.
"""
self.__dict__.update(kw)
self.value = value
if token is None:
token = value
if title is None:
title = str(value)
self.token = str(token)
self.title = title
self.description = description
def __str__(self):
return 'Term<"%s:%s:%s">'%(self.value, self.token, self.title)
__repr__ = __str__
@implementer(IVocabulary)
class Vocabulary(object):
"""Vocabulary that works from a sequence of terms."""
def __init__(self, *items):
"""Initialize the vocabulary given a list of terms.
The vocabulary keeps a reference to the list of terms passed
in; it should never be modified while the vocabulary is used.
Also it is possible to initialize vocabulary with sequence of items,
in this case constructor automatically creates `Term` objects.
"""
terms = []
for rec in items:
if ITerm.providedBy(rec):
terms.append(rec)
continue
if isinstance(rec, string_types):
rec = (rec,)
if not hasattr(rec, '__iter__'):
rec = (rec,)
if len(rec) == 2:
terms.append(self.create_term(rec[0], rec[1], rec[1]))
else:
terms.append(self.create_term(*rec))
self.by_value = {}
self.by_token = {}
self._terms = terms
for term in self._terms:
if term.value in self.by_value:
raise ValueError(
'term values must be unique: %s' % repr(term.value))
if term.token in self.by_token:
raise ValueError(
'term tokens must be unique: %s' % repr(term.token))
self.by_value[term.value] = term
self.by_token[term.token] = term
@classmethod
def create_term(cls, *args):
"""Create a single term from data."""
return Term(*args)
def __contains__(self, value):
try:
return value in self.by_value
except:
# sometimes values are not hashable
return False
def get_term(self, value):
try:
return self.by_value[value]
except:
raise LookupError(value)
def get_term_bytoken(self, token):
try:
return self.by_token[token]
except:
raise LookupError(token)
def get_value(self, token):
return self.get_term_bytoken(token).value
def __iter__(self):
return iter(self._terms)
def __len__(self):
return len(self.by_value)
def __getitem__(self, index):
return self._terms[index]
| 29.560748
| 76
| 0.574455
|
4a1304035c6b088c7bcfd2736382df23b87b9664
| 4,738
|
py
|
Python
|
EP1/Gabriel Frederico Takahashi Vargas EP1.py
|
GabrielTaka/Fatec-EP-PYTHON-language-Masanori
|
c4d30b4dd375e495c99b770dcc5e28fc5be49f04
|
[
"MIT"
] | 1
|
2018-01-14T21:56:07.000Z
|
2018-01-14T21:56:07.000Z
|
EP1/Gabriel Frederico Takahashi Vargas EP1.py
|
GabrielTaka/Fatec-EP-PYTHON-language-Masanori
|
c4d30b4dd375e495c99b770dcc5e28fc5be49f04
|
[
"MIT"
] | null | null | null |
EP1/Gabriel Frederico Takahashi Vargas EP1.py
|
GabrielTaka/Fatec-EP-PYTHON-language-Masanori
|
c4d30b4dd375e495c99b770dcc5e28fc5be49f04
|
[
"MIT"
] | null | null | null |
txtB = '''clnr bcdktps fvxsmsbx kqj hvbncjw wsmngb xhcvvc nfkjfn fkvl ljhqlbhs ptqtwp
vtfbq szkpmn hxl hsd qmrr jzjbjgp tsssrk fmvx bstbzwsx njdm nrvfgs bdjzlg nwnlmbx
vjqqxsp vbxj gwtll xfrmgqqj mmttm xlbnkbw bkw frgwz twrkx nfgxqmb cvr tgmw nfw mghts
bsncq zfn vpkgwcd cdbdwsjf krrkl fst lmz vvwtkrf dqt fkm pfnpqh vdslwsdk kbmfjgs fkwkbd
nssd clbpzcpd qph ksfgcvw mcs nbjbkrz jbtcbqm lpj wvsscb tpqnm gswg gtd dpf shztl
brgkfqnf xgw fsjmrvcx qzd slt xhhg vxgcfc hxfq jhxpngr nmpx gjdn kgmq hpb klzsxz
bpcqrhq wrnn mgthn wzjcvj pgft vtksnpbb qtlmgsh nrzdkx lvd kpr fwrgdmjt jzknzgk vqmkgkb
mptrq rslljk trggnlpd fcffvg wshnn tvq skddwrtg srd gbwdh pvgkrhd qndpq phmhxkck vkgfc
mzpp gph nzn lcxc jpnx mxrg xpjkjxc fkhr fqfqcjtd bzmvqp wlsdx txlttnpb vdb mxnswm
dwqnsgj mxg xmszj bdttl xmwth nfchzb vtlxg lqmxbx sgs hnw zgdsp qcgpc xhk pwbfdmtc
ftkgv hqntlps dgbwpk jzsgkb kcsb xjnjhgh ckx pxzm tlpzlxj bjdd rjjp mxqxqdxc kfvg
mqpvxk cmg jgz fmzf bnr fvfgnzx crkxcs zszmrfjv qsjgzzzp lcgsgjvh znjntxpj hdqzjc
tmzxrrg nqlnsk mwtlm cvdk vqbhj wbpdssgm nsnv nhfptrsg vjgrlfs zkvkdxz brzbhlns pfs
cvrjxjq fbbkvdhr bhtlqhvv rhjtsvv gfbrqn mvclz ghm sgkk tcmfz dtrmkn dzcjppjm mlpkx
qbqbvpsm xxd vrqq sjwwc pchqk jnwp txk pvf dpff lqrdrz ncv mwgf chnz rjlfpch rnvdjpc
mfxs shdf nfpnlr rvqw zmllbxs sfbvz hbcv bgp jtg bgsfnz hgkkwd nnzbqwgs lktdlrlx qxrs
dpcj kwfj tjsh rnxhwgd dpmdnz xgnfggb pfrxglb plzxjqlk whts jgrt clvtxn mhnhb wndwxs
wlkdtjz ghhmdq bcw vvlhntpt jnzznp bksx mvssxl kjdkt pjjzqkvx vwtbh rkkxqk xbwknvmr
nmddl rnwqq frfbhk ctvfgxzv gplktxj lljfz lntr bndwhjwp tvr gbjz lbjrnmt hzjwqn wnxsmnx
mjxh hlsssh nczkp wtbfv ztbcph cnhgxsd qddzdv ktzb jwhvvrtr qlrhnww zld cdvr xvsbdw
lvsbxzv cslq cjvrwg sdvnw hcr stnhs nsxmr npqqm psr gcgg zpvrdnc qgzf qxp npfvr rzv
ptkp drrlpnr dpptqzc vbf tdhps crjnct gvc fxt hckr vjhlfgld rfkvbr fbbhqvg kgzcf gsxm
fvjkmj xmchrvwx dcxggwhc fmt jwvbhl ppldgl mfxmqjn tvcgnrb zssswpc ghhq dvsl vlj rqrsw
pbvtkm kdtxsj qlgpv xtm rncp smnrwmlv kbndtscg gtps wbdxk qbm rqprpvj stgwsc lqklxr
bxqhw hfhp lbrhbn klhtb wslrbz lpwqpn znsxfqs ldzzlkg hphjhns thc vddhjkq dpzrl mqt
lvmptxcd bfxlg hnss bgncx slw rpgtkzz jgngwzc vjxgbs npdd hpzmp xfmnsjc scqskgsl dzxpk
bwxr tbxwpk svtlgdl dzsvqbnx wjfktwv qtwhllw nndtsx hxmjpmnv cnxck cmh kccw hdfrtd
qzcsksd dljps xcfstz rvfqp hrwx pslzhzhc bkwfwfnj phcxvpf lpwl dzftkjr vpvwnjtg srhnz
hwlqvc dsrdq ntk vnxr vbmd kctx jhg ptfhnlc xxtctgsj pwj pxwt fzzx zzzfchrk qznnxl gdzj
rjd pkj jlmwtcs twgzh ttmgnwb hwjbnqwv zgblvhlj bgg sqhfcnlk slkbcmj nwmm hgjjksxf
drfrgjx hbjvpw rjrlrfk hcv frp rbsfrrcf jtkqhh kjtqpxhw hxhjznc jgxn dtlsp llw xvrvsdf
cgfwq wrmv kcnr mpqnqr dqtswd qnnxhm jsl njxtbh zcbpcbpt bdllcsdl qbxwpg mlgtjw zbgsxg
lbcxxgsf cbwjfldn rdp vjwsjpw srvc rkln bddvc gbgw nckdtf ckvmtbvf cwnf cnsmqxwn zvxhgq
mgj pxv jms hbjr fpvxzwmx srlml rzfmdp xtxsblgt gwbvj krnn lthk dcx vhpbdwd rtvmzn
bcclbzz hcdp qlw mxpwc lxccgcxh zhprp rsfpxl pmznqzh nbbkqjt cbmhp hbtn vlgfskcx cwh
jtxhfvr jjc ttjcc cqlphsk mtgnc bdr xvpztf sxpb fxh gpgpqtrc pvpcmx wvcsmb lkd qxpm
tdbxnwrg wvcpw hwswwqg bhkwfwxm cbvwhf bgnvwqln fgn ntnhcl ffq btzjd pbzqnc thfdcpxt
rzmjrbfb lcrdlc tqlqrf ffkbj kqt qnkfd jwbt rgc zdbsvmll pfvnpwj ppq kltqfx klmjxg
qpnnjwl xdvs qdvjskx dcnhhltm kqwxr xrzwj prmqclss xvhcb lfn hlcnqnw wbnxl nmjkkmpg gth
gbb xsnhn qgjs xlt nmzhrrqn bkrhjtsl rvqzhmm lzlbc lmpfkk xtkdp ckm vhbnd kmhj xzrz
gwdxkhr xnk jwnwz knbhrwgs dfcbw nfgkxsw fbg rntpnh mvkfdhh rcltszw lhld plr xhqdvhmp
xhxsqp zqmsnl wkfxqzkx rdzhzx znzppsg hmqhxfpb nnmr kfpqcpx zbfck sdqxsnw cvl vsdrhj
pbmlw gvp pwlhnpgf rrpzcwp pmcmrmvf fttbf zgtkjdm ddqmr twtksl vkdns rffn vkjnkkv
hslbhksz glzb grwpg szzw rwmbvt grtkzrwh nwkjt tkhnb wbswcvbh mzmlgpp gggck sdcptlln
gqz vpkpbsn nnw pnbqqbk mrgnflhr tchctgjn zmfkxvms kvqwc kgsh jfdjq mqndvm kckksgp
wrrdnmjz bjjcsvms dvfqjqsf wps ngrngr rspxz bnvkmhcl kxgdbxhh kbcbg dmzwnfgm qnmtrvx
kqwjtrcg'''.split()
#Pergunta A - Preposiçôes:
listaNarutao = []
for i in range(0,(len(txtB))):
if txtB[i].startswith(tuple('bct')) and txtB[i].endswith((tuple('bct'))):
listaNarutao.append(txtB[i])
print (len(listaNarutao))
#Pergunta B - Verbos:
listaverbo = []
for i in range(0,(len(txtB))):
if len(txtB[i])== 7:
if txtB[i].endswith((tuple('adefghijklçmnopqrsuvwxyz'))):
listaverbo.append(txtB[i])
print (len(listaverbo))
#Pergunta C - Primeira pessoa
lista1 = []
for i in range(0,(len(txtB))):
if txtB[i].startswith(tuple('adefghijklçmnopqrsuvwxyz')) and len(txtB[i])== 7:
if txtB[i].endswith((tuple('adefghijklçmnopqrsuvwxyz'))):
lista1.append(txtB[i])
print (len(lista1))
| 59.225
| 88
| 0.795905
|
4a1304d71a3b9251fe528ae448edd98c8f69d082
| 16,544
|
py
|
Python
|
GUI.py
|
gamesun/MyTerm-for-WangH
|
e3a1cc3f58ef1b17916e32debff6eb7d917cdbb1
|
[
"BSD-3-Clause"
] | 1
|
2020-08-26T07:47:22.000Z
|
2020-08-26T07:47:22.000Z
|
GUI.py
|
gamesun/MyTerm-for-WangH
|
e3a1cc3f58ef1b17916e32debff6eb7d917cdbb1
|
[
"BSD-3-Clause"
] | null | null | null |
GUI.py
|
gamesun/MyTerm-for-WangH
|
e3a1cc3f58ef1b17916e32debff6eb7d917cdbb1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.6.8 (standalone edition) on Tue Jan 14 10:41:03 2014
#
import wx
import wx.grid
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.statusbar = self.CreateStatusBar(5, wx.ST_SIZEGRIP)
self.SplitterWindow = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_3D | wx.SP_BORDER)
self.window_1_pane_1 = wx.ScrolledWindow(self.SplitterWindow, wx.ID_ANY, style=wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL)
self.pnlSettingBar = wx.Panel(self.window_1_pane_1, wx.ID_ANY)
self.btnHideBar = wx.Button(self.pnlSettingBar, wx.ID_ANY, "Hide")
self.btnEnumPorts = wx.Button(self.pnlSettingBar, wx.ID_ANY, "EnumPorts")
self.label_1 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Port")
self.cmbPort = wx.ComboBox(self.pnlSettingBar, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN)
self.label_2 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Baud Rate")
self.cmbBaudRate = wx.ComboBox(self.pnlSettingBar, wx.ID_ANY, choices=["300", "600", "1200", "1800", "2400", "4800", "9600", "19200", "38400", "57600", "115200", "230400", "460800", "500000", "576000", "921600", "1000000", "1152000", "1500000", "2000000", "2500000", "3000000", "3500000", "4000000"], style=wx.CB_DROPDOWN)
self.label_3 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Data Bits")
self.choiceDataBits = wx.Choice(self.pnlSettingBar, wx.ID_ANY, choices=["5", "6", "7", "8"])
self.label_4 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Parity")
self.choiceParity = wx.Choice(self.pnlSettingBar, wx.ID_ANY, choices=["None", "Even", "Odd", "Mark", "Space"])
self.label_5 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Stop Bits")
self.choiceStopBits = wx.Choice(self.pnlSettingBar, wx.ID_ANY, choices=["1", "1.5", "2"])
self.chkboxrtscts = wx.CheckBox(self.pnlSettingBar, wx.ID_ANY, "RTS/CTS")
self.chkboxxonxoff = wx.CheckBox(self.pnlSettingBar, wx.ID_ANY, "Xon/Xoff")
self.sizer_6_staticbox = wx.StaticBox(self.pnlSettingBar, wx.ID_ANY, "HandShake")
self.btnOpen = wx.Button(self.pnlSettingBar, wx.ID_ANY, "Open")
self.btnClear = wx.Button(self.pnlSettingBar, wx.ID_ANY, "Clear Screen")
self.window_1_pane_2 = wx.Panel(self.SplitterWindow, wx.ID_ANY)
self.pnlGrid = wx.ScrolledWindow(self.window_1_pane_2, wx.ID_ANY, style=wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL)
self.grid_csv = wx.grid.Grid(self.pnlGrid, wx.ID_ANY, size=(1, 1))
self.button_1 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send1")
self.button_2 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send2")
self.button_3 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send3")
self.button_4 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send4")
self.button_5 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send5")
self.button_6 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send6")
self.button_7 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send7")
self.button_8 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send8")
self.button_9 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send9")
self.button_10 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send10")
self.button_11 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 11")
self.button_12 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 12")
self.button_13 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 13")
self.button_14 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 14")
self.button_15 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 15")
self.button_16 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 16")
self.button_17 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 17")
self.button_18 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 18")
self.button_19 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 19")
self.button_20 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 20")
self.button_21 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 21")
self.button_22 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 22")
self.button_23 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 23")
self.button_24 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 24")
self.button_25 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 25")
self.button_26 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 26")
self.button_27 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 27")
self.button_28 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 28")
self.button_29 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 29")
self.button_30 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 30")
self.button_31 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 31")
self.button_32 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 32")
self.button_33 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 33")
self.button_34 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 34")
self.button_35 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 35")
self.button_36 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 36")
self.button_37 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 37")
self.button_38 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 38")
self.button_39 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 39")
self.button_40 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 40")
self.button_41 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 41")
self.button_42 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 42")
self.button_43 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 43")
self.button_44 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 44")
self.button_45 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 45")
self.button_46 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 46")
self.button_47 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 47")
self.button_48 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 48")
self.button_49 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 49")
self.button_50 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 50")
self.txtctlMain = wx.TextCtrl(self.window_1_pane_2, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_RICH | wx.TE_RICH2 | wx.TE_AUTO_URL | wx.TE_LINEWRAP | wx.TE_WORDWRAP)
self.pnlTransmitHex = wx.Panel(self.window_1_pane_2, wx.ID_ANY)
self.label_6 = wx.StaticText(self.pnlTransmitHex, wx.ID_ANY, "Transmit Hex")
self.btnTransmitHex = wx.Button(self.pnlTransmitHex, wx.ID_ANY, "Transmit")
self.txtTransmitHex = wx.TextCtrl(self.pnlTransmitHex, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_RICH | wx.TE_RICH2 | wx.TE_AUTO_URL | wx.TE_LINEWRAP | wx.TE_WORDWRAP)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("MyTerm")
self.SetSize((834, 603))
self.statusbar.SetStatusWidths([-28, -10, -10, 55, 105])
# statusbar fields
statusbar_fields = ["", "Rx:0", "Tx:0", "Rx:Ascii", "Local echo:Off"]
for i in range(len(statusbar_fields)):
self.statusbar.SetStatusText(statusbar_fields[i], i)
self.cmbBaudRate.SetSelection(7)
self.choiceDataBits.SetSelection(3)
self.choiceParity.SetSelection(0)
self.choiceStopBits.SetSelection(0)
self.btnOpen.SetMinSize((-1, 30))
self.btnClear.SetMinSize((-1, 30))
self.pnlSettingBar.SetMinSize((158, -1))
self.window_1_pane_1.SetScrollRate(1, 1)
self.grid_csv.CreateGrid(50, 9)
self.grid_csv.SetRowLabelSize(25)
self.grid_csv.SetColLabelSize(21)
self.button_1.SetMinSize((-1, 20))
self.button_2.SetMinSize((-1, 20))
self.button_3.SetMinSize((-1, 20))
self.button_4.SetMinSize((-1, 20))
self.button_5.SetMinSize((-1, 20))
self.button_6.SetMinSize((-1, 20))
self.button_7.SetMinSize((-1, 20))
self.button_8.SetMinSize((-1, 20))
self.button_9.SetMinSize((-1, 20))
self.button_10.SetMinSize((-1, 20))
self.button_11.SetMinSize((-1, 20))
self.button_12.SetMinSize((-1, 20))
self.button_13.SetMinSize((-1, 20))
self.button_14.SetMinSize((-1, 20))
self.button_15.SetMinSize((-1, 20))
self.button_16.SetMinSize((-1, 20))
self.button_17.SetMinSize((-1, 20))
self.button_18.SetMinSize((-1, 20))
self.button_19.SetMinSize((-1, 20))
self.button_20.SetMinSize((-1, 20))
self.button_21.SetMinSize((-1, 20))
self.button_22.SetMinSize((-1, 20))
self.button_23.SetMinSize((-1, 20))
self.button_24.SetMinSize((-1, 20))
self.button_25.SetMinSize((-1, 20))
self.button_26.SetMinSize((-1, 20))
self.button_27.SetMinSize((-1, 20))
self.button_28.SetMinSize((-1, 20))
self.button_29.SetMinSize((-1, 20))
self.button_30.SetMinSize((-1, 20))
self.button_31.SetMinSize((-1, 20))
self.button_32.SetMinSize((-1, 20))
self.button_33.SetMinSize((-1, 20))
self.button_34.SetMinSize((-1, 20))
self.button_35.SetMinSize((-1, 20))
self.button_36.SetMinSize((-1, 20))
self.button_37.SetMinSize((-1, 20))
self.button_38.SetMinSize((-1, 20))
self.button_39.SetMinSize((-1, 20))
self.button_40.SetMinSize((-1, 20))
self.button_41.SetMinSize((-1, 20))
self.button_42.SetMinSize((-1, 20))
self.button_43.SetMinSize((-1, 20))
self.button_44.SetMinSize((-1, 20))
self.button_45.SetMinSize((-1, 20))
self.button_46.SetMinSize((-1, 20))
self.button_47.SetMinSize((-1, 20))
self.button_48.SetMinSize((-1, 20))
self.button_49.SetMinSize((-1, 20))
self.button_50.SetMinSize((-1, 20))
self.pnlGrid.SetMinSize((-1, 225))
self.pnlGrid.SetScrollRate(10, 20)
self.txtctlMain.SetFont(wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, "Consolas"))
self.pnlTransmitHex.SetMinSize((-1, 80))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_7 = wx.BoxSizer(wx.VERTICAL)
sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
sizer_7_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_8_copy = wx.BoxSizer(wx.VERTICAL)
sizer_9 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
self.sizer_6_staticbox.Lower()
sizer_6 = wx.StaticBoxSizer(self.sizer_6_staticbox, wx.HORIZONTAL)
grid_sizer_1 = wx.GridSizer(6, 2, 0, 0)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_4.Add(self.btnHideBar, 1, wx.ALL | wx.EXPAND, 1)
sizer_4.Add(self.btnEnumPorts, 1, wx.ALL | wx.EXPAND, 1)
sizer_3.Add(sizer_4, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.label_1, 0, wx.ALL, 1)
grid_sizer_1.Add(self.cmbPort, 0, wx.ALL | wx.EXPAND, 1)
grid_sizer_1.Add(self.label_2, 0, wx.ALL, 1)
grid_sizer_1.Add(self.cmbBaudRate, 0, wx.ALL | wx.EXPAND, 1)
grid_sizer_1.Add(self.label_3, 0, wx.ALL, 1)
grid_sizer_1.Add(self.choiceDataBits, 0, wx.ALL | wx.EXPAND, 1)
grid_sizer_1.Add(self.label_4, 0, wx.ALL, 1)
grid_sizer_1.Add(self.choiceParity, 0, wx.ALL | wx.EXPAND, 1)
grid_sizer_1.Add(self.label_5, 0, wx.ALL, 1)
grid_sizer_1.Add(self.choiceStopBits, 0, wx.ALL | wx.EXPAND, 1)
sizer_3.Add(grid_sizer_1, 0, wx.ALL | wx.EXPAND, 1)
sizer_6.Add(self.chkboxrtscts, 1, wx.ALL | wx.EXPAND, 1)
sizer_6.Add(self.chkboxxonxoff, 1, wx.ALL | wx.EXPAND, 1)
sizer_3.Add(sizer_6, 0, wx.LEFT | wx.RIGHT | wx.EXPAND, 2)
sizer_3.Add(self.btnOpen, 0, wx.ALL | wx.EXPAND, 5)
sizer_3.Add(self.btnClear, 0, wx.ALL | wx.EXPAND, 5)
self.pnlSettingBar.SetSizer(sizer_3)
sizer_2.Add(self.pnlSettingBar, 1, wx.EXPAND, 0)
self.window_1_pane_1.SetSizer(sizer_2)
sizer_9.Add(self.grid_csv, 1, wx.EXPAND, 0)
sizer_7_copy.Add(sizer_9, 1, wx.EXPAND, 0)
sizer_8_copy.Add((20, 20), 0, 0, 0)
sizer_8_copy.Add(self.button_1, 0, 0, 0)
sizer_8_copy.Add(self.button_2, 0, 0, 0)
sizer_8_copy.Add(self.button_3, 0, 0, 0)
sizer_8_copy.Add(self.button_4, 0, 0, 0)
sizer_8_copy.Add(self.button_5, 0, 0, 0)
sizer_8_copy.Add(self.button_6, 0, 0, 0)
sizer_8_copy.Add(self.button_7, 0, 0, 0)
sizer_8_copy.Add(self.button_8, 0, 0, 0)
sizer_8_copy.Add(self.button_9, 0, 0, 0)
sizer_8_copy.Add(self.button_10, 0, 0, 0)
sizer_8_copy.Add(self.button_11, 0, 0, 0)
sizer_8_copy.Add(self.button_12, 0, 0, 0)
sizer_8_copy.Add(self.button_13, 0, 0, 0)
sizer_8_copy.Add(self.button_14, 0, 0, 0)
sizer_8_copy.Add(self.button_15, 0, 0, 0)
sizer_8_copy.Add(self.button_16, 0, 0, 0)
sizer_8_copy.Add(self.button_17, 0, 0, 0)
sizer_8_copy.Add(self.button_18, 0, 0, 0)
sizer_8_copy.Add(self.button_19, 0, 0, 0)
sizer_8_copy.Add(self.button_20, 0, 0, 0)
sizer_8_copy.Add(self.button_21, 0, 0, 0)
sizer_8_copy.Add(self.button_22, 0, 0, 0)
sizer_8_copy.Add(self.button_23, 0, 0, 0)
sizer_8_copy.Add(self.button_24, 0, 0, 0)
sizer_8_copy.Add(self.button_25, 0, 0, 0)
sizer_8_copy.Add(self.button_26, 0, 0, 0)
sizer_8_copy.Add(self.button_27, 0, 0, 0)
sizer_8_copy.Add(self.button_28, 0, 0, 0)
sizer_8_copy.Add(self.button_29, 0, 0, 0)
sizer_8_copy.Add(self.button_30, 0, 0, 0)
sizer_8_copy.Add(self.button_31, 0, 0, 0)
sizer_8_copy.Add(self.button_32, 0, 0, 0)
sizer_8_copy.Add(self.button_33, 0, 0, 0)
sizer_8_copy.Add(self.button_34, 0, 0, 0)
sizer_8_copy.Add(self.button_35, 0, 0, 0)
sizer_8_copy.Add(self.button_36, 0, 0, 0)
sizer_8_copy.Add(self.button_37, 0, 0, 0)
sizer_8_copy.Add(self.button_38, 0, 0, 0)
sizer_8_copy.Add(self.button_39, 0, 0, 0)
sizer_8_copy.Add(self.button_40, 0, 0, 0)
sizer_8_copy.Add(self.button_41, 0, 0, 0)
sizer_8_copy.Add(self.button_42, 0, 0, 0)
sizer_8_copy.Add(self.button_43, 0, 0, 0)
sizer_8_copy.Add(self.button_44, 0, 0, 0)
sizer_8_copy.Add(self.button_45, 0, 0, 0)
sizer_8_copy.Add(self.button_46, 0, 0, 0)
sizer_8_copy.Add(self.button_47, 0, 0, 0)
sizer_8_copy.Add(self.button_48, 0, 0, 0)
sizer_8_copy.Add(self.button_49, 0, 0, 0)
sizer_8_copy.Add(self.button_50, 0, 0, 0)
sizer_7_copy.Add(sizer_8_copy, 0, 0, 0)
self.pnlGrid.SetSizer(sizer_7_copy)
sizer_5.Add(self.pnlGrid, 0, wx.EXPAND, 0)
sizer_5.Add(self.txtctlMain, 1, wx.EXPAND, 0)
sizer_8.Add(self.label_6, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_8.Add((50, 20), 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_8.Add(self.btnTransmitHex, 0, wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL, 2)
sizer_8.Add((10, 20), 0, 0, 0)
sizer_7.Add(sizer_8, 0, wx.EXPAND, 0)
sizer_7.Add(self.txtTransmitHex, 1, wx.EXPAND, 0)
self.pnlTransmitHex.SetSizer(sizer_7)
sizer_5.Add(self.pnlTransmitHex, 0, wx.EXPAND, 0)
self.window_1_pane_2.SetSizer(sizer_5)
self.SplitterWindow.SplitVertically(self.window_1_pane_1, self.window_1_pane_2, 16)
sizer_1.Add(self.SplitterWindow, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
self.Centre()
# end wxGlade
# end of class MyFrame
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
mainFrame = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(mainFrame)
mainFrame.Show()
return 1
# end of class MyApp
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
| 54.242623
| 331
| 0.628868
|
4a1305219999dbe88334173058cadeed2b094098
| 364
|
py
|
Python
|
AdvancedPythonModules/collections_module_defaultdict.py
|
theprogrammingthinker/Python-practice
|
fef11a7fbd5082a0614b01f88a13ea29d68860bf
|
[
"Unlicense"
] | 1
|
2017-05-02T10:28:36.000Z
|
2017-05-02T10:28:36.000Z
|
AdvancedPythonModules/collections_module_defaultdict.py
|
theprogrammingthinker/Python-practice
|
fef11a7fbd5082a0614b01f88a13ea29d68860bf
|
[
"Unlicense"
] | null | null | null |
AdvancedPythonModules/collections_module_defaultdict.py
|
theprogrammingthinker/Python-practice
|
fef11a7fbd5082a0614b01f88a13ea29d68860bf
|
[
"Unlicense"
] | null | null | null |
from collections import defaultdict
d = {"k1": 1}
print(d["k1"])
# d['k2']
# KeyError: 'one'
d = defaultdict(object)
print(d['one'])
# <object object at 0x00000174A073A0A0>
for item in d:
print(item)
# one
d = defaultdict(lambda : 0)
print(d['one'])
print(d['two'])
print(d)
# defaultdict(<function <lambda> at 0x00000217022C3E18>, {'one': 0, 'two': 0})
| 15.826087
| 78
| 0.645604
|
4a1305a44cb22b0ff8c9aea59c06d7be304e0dfb
| 445
|
py
|
Python
|
server/model/user.py
|
dyf102/Gomoku-online
|
889df373c9a9827a867d1d4559ec105f4358d4c6
|
[
"Apache-2.0"
] | null | null | null |
server/model/user.py
|
dyf102/Gomoku-online
|
889df373c9a9827a867d1d4559ec105f4358d4c6
|
[
"Apache-2.0"
] | null | null | null |
server/model/user.py
|
dyf102/Gomoku-online
|
889df373c9a9827a867d1d4559ec105f4358d4c6
|
[
"Apache-2.0"
] | null | null | null |
IDLE = 0
OFFLINE = 1
INGAME = 2
class User(object):
def __init__(self, username, uid, point=0, status=IDLE):
self.username = username
self.uid = uid
self.point = point
self.status = status
def ___str__(self):
return '{} {} {} {}'.format(self.username, self.uid, self.point, self.is_idle)
def __eq__(self, other):
return self.username == other.username and self.uid == other.uid
| 21.190476
| 86
| 0.606742
|
4a13065831fab61cb838c9567bd8066d374617a2
| 1,877
|
py
|
Python
|
software/python/simple_pendulum/controllers/energy_shaping/unit_test.py
|
alopezrivera/torque_limited_simple_pendulum
|
2164a41d65c16743ba260a79a04a04cdd72c3903
|
[
"BSD-3-Clause"
] | 15
|
2021-10-16T04:50:34.000Z
|
2022-03-26T23:54:19.000Z
|
software/python/simple_pendulum/controllers/energy_shaping/unit_test.py
|
alopezrivera/torque_limited_simple_pendulum
|
2164a41d65c16743ba260a79a04a04cdd72c3903
|
[
"BSD-3-Clause"
] | 17
|
2021-11-30T22:17:28.000Z
|
2022-03-21T12:28:45.000Z
|
software/python/simple_pendulum/controllers/energy_shaping/unit_test.py
|
alopezrivera/torque_limited_simple_pendulum
|
2164a41d65c16743ba260a79a04a04cdd72c3903
|
[
"BSD-3-Clause"
] | 13
|
2021-10-18T07:45:29.000Z
|
2022-03-22T12:56:33.000Z
|
"""
Unit Tests
==========
"""
import unittest
import numpy as np
from simple_pendulum.model.pendulum_plant import PendulumPlant
from simple_pendulum.simulation.simulation import Simulator
from simple_pendulum.controllers.energy_shaping.energy_shaping_controller import EnergyShapingController
class Test(unittest.TestCase):
epsilon = 0.2
def test_0_energy_shaping_swingup(self):
mass = 0.57288
length = 0.5
damping = 0.05
gravity = 9.81
coulomb_fric = 0.0
torque_limit = 1.0
inertia = mass*length*length
pendulum = PendulumPlant(mass=mass,
length=length,
damping=damping,
gravity=gravity,
coulomb_fric=coulomb_fric,
inertia=inertia,
torque_limit=torque_limit)
controller = EnergyShapingController(mass, length, damping, gravity)
controller.set_goal([np.pi, 0])
sim = Simulator(plant=pendulum)
dt = 0.01
t_final = 5.0
T, X, U = sim.simulate(t0=0.0,
x0=[0.01, 0.0],
tf=t_final,
dt=dt,
controller=controller,
integrator="runge_kutta")
self.assertIsInstance(T, list)
self.assertIsInstance(X, list)
self.assertIsInstance(U, list)
swingup_success = True
if np.abs((X[-1][0] % (2*np.pi)) - np.pi) > self.epsilon:
if np.abs(X[-1][1]) > self.epsilon:
swingup_success = False
print("Energy Shaping Controller did not swing up",
"final state: ", X[-1])
self.assertTrue(swingup_success)
| 29.793651
| 104
| 0.523175
|
4a130676d09f041d7eb8e340dc1559192942c0c9
| 1,286
|
py
|
Python
|
src/python/pants/util/retry.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | 1
|
2020-06-13T22:01:39.000Z
|
2020-06-13T22:01:39.000Z
|
src/python/pants/util/retry.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/util/retry.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | 3
|
2020-06-30T08:28:13.000Z
|
2021-07-28T09:35:57.000Z
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import time
logger = logging.getLogger(__name__)
def retry_on_exception(func, max_retries, exception_types, backoff_func=lambda n: 0):
"""Retry a callable against a set of exceptions, optionally sleeping between retries.
:param callable func: The callable to retry.
:param int max_retries: The maximum number of times to attempt running the function.
:param tuple exception_types: The types of exceptions to catch for retry.
:param callable backoff_func: A callable that will be called with the current attempt count to
determine the amount of time to sleep between retries. E.g. a
max_retries=4 with a backoff_func=lambda n: n * n will result in
sleeps of [1, 4, 9] between retries. Defaults to no backoff.
"""
for i in range(0, max_retries):
if i:
time.sleep(backoff_func(i))
try:
return func()
except exception_types as e:
logger.debug(f"encountered exception on retry #{i}: {e!r}")
if i == max_retries - 1:
raise
| 42.866667
| 98
| 0.642302
|
4a1306d8e9ba9d6249e579516852f99bb8faa673
| 2,474
|
py
|
Python
|
examples/system/light_sleep/example_test.py
|
BU-EC444/esp-idf
|
5963de1caf284b14ddfed11e52730a55e3783a3d
|
[
"Apache-2.0"
] | 4
|
2022-03-15T22:43:28.000Z
|
2022-03-28T01:25:08.000Z
|
examples/system/light_sleep/example_test.py
|
BU-EC444/esp-idf
|
5963de1caf284b14ddfed11e52730a55e3783a3d
|
[
"Apache-2.0"
] | null | null | null |
examples/system/light_sleep/example_test.py
|
BU-EC444/esp-idf
|
5963de1caf284b14ddfed11e52730a55e3783a3d
|
[
"Apache-2.0"
] | 1
|
2022-03-28T03:15:38.000Z
|
2022-03-28T03:15:38.000Z
|
from __future__ import print_function
import re
import time
import ttfw_idf
ENTERING_SLEEP_STR = 'Entering light sleep'
EXIT_SLEEP_REGEX = re.compile(r'Returned from light sleep, reason: (\w+), t=(\d+) ms, slept for (\d+) ms')
WAITING_FOR_GPIO_STR = re.compile(r'Waiting for GPIO\d to go high...')
WAKEUP_INTERVAL_MS = 2000
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32s2', 'esp32c3', 'esp32s3'])
def test_examples_system_light_sleep(env, extra_data):
dut = env.get_dut('light_sleep_example', 'examples/system/light_sleep')
dut.start_app()
# Ensure DTR and RTS are de-asserted for proper control of GPIO0
dut.port_inst.setDTR(False)
dut.port_inst.setRTS(False)
# enter sleep first time
dut.expect(ENTERING_SLEEP_STR, timeout=30)
# don't check timing here, might be cache dependent
dut.expect(EXIT_SLEEP_REGEX)
print('Got first sleep period')
# enter sleep second time
dut.expect(ENTERING_SLEEP_STR)
groups = dut.expect(EXIT_SLEEP_REGEX)
print('Got second sleep period, wakeup from {}, slept for {}'.format(groups[0], groups[2]))
# sleep time error should be less than 1ms
assert(groups[0] == 'timer' and int(groups[2]) >= WAKEUP_INTERVAL_MS - 1 and int(groups[2]) <= WAKEUP_INTERVAL_MS + 1)
# this time we'll test gpio wakeup
dut.expect(ENTERING_SLEEP_STR)
print('Pulling GPIO0 low using DTR')
dut.port_inst.setDTR(True)
time.sleep(1)
groups = dut.expect(EXIT_SLEEP_REGEX)
print('Got third sleep period, wakeup from {}, slept for {}'.format(groups[0], groups[2]))
assert(groups[0] == 'pin' and int(groups[2]) < WAKEUP_INTERVAL_MS)
dut.expect(WAITING_FOR_GPIO_STR)
print('Is waiting for GPIO...')
dut.port_inst.setDTR(False)
dut.expect(ENTERING_SLEEP_STR)
print('Went to sleep again')
# Write 'U' to uart, 'U' in ascii is 0x55 which contains 8 edges in total
dut.write('U')
time.sleep(1)
groups = dut.expect(EXIT_SLEEP_REGEX)
print('Got third sleep period, wakeup from {}, slept for {}'.format(groups[0], groups[2]))
assert(groups[0] == 'uart' and int(groups[2]) < WAKEUP_INTERVAL_MS)
print('Went to sleep again')
groups = dut.expect(EXIT_SLEEP_REGEX)
assert(groups[0] == 'timer' and int(groups[2]) >= WAKEUP_INTERVAL_MS - 1 and int(groups[2]) <= WAKEUP_INTERVAL_MS + 1)
print('Woke up from timer again')
if __name__ == '__main__':
test_examples_system_light_sleep()
| 36.382353
| 122
| 0.700889
|
4a13080ba9fb7e4b45111e0f195dbce3a4cfb728
| 20,124
|
py
|
Python
|
content/test/gpu/gpu_tests/gpu_integration_test_unittest.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
content/test/gpu/gpu_tests/gpu_integration_test_unittest.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 86
|
2015-10-21T13:02:42.000Z
|
2022-03-14T07:50:50.000Z
|
content/test/gpu/gpu_tests/gpu_integration_test_unittest.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# It's reasonable for unittests to be messing with protected members.
# pylint: disable=protected-access
from __future__ import print_function
import json
import os
import sys
import unittest
import tempfile
if sys.version_info[0] == 2:
import mock
else:
import unittest.mock as mock
import six
import gpu_project_config
import run_gpu_integration_test
from gpu_tests import context_lost_integration_test
from gpu_tests import gpu_helper
from gpu_tests import gpu_integration_test
from gpu_tests import path_util
from gpu_tests import webgl_conformance_integration_test
from py_utils import tempfile_ext
from telemetry.internal.util import binary_manager
from telemetry.internal.platform import system_info
from telemetry.testing import browser_test_runner
from telemetry.testing import fakes
from telemetry.testing import run_browser_tests
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'tools', 'perf')
from chrome_telemetry_build import chromium_config
# Unittest test cases are defined as public methods, so ignore complaints about
# having too many.
# pylint: disable=too-many-public-methods
VENDOR_NVIDIA = 0x10DE
VENDOR_AMD = 0x1002
VENDOR_INTEL = 0x8086
VENDOR_STRING_IMAGINATION = 'Imagination Technologies'
DEVICE_STRING_SGX = 'PowerVR SGX 554'
def _GetSystemInfo( # pylint: disable=too-many-arguments
gpu='',
device='',
vendor_string='',
device_string='',
passthrough=False,
gl_renderer=''):
sys_info = {
'model_name': '',
'gpu': {
'devices': [
{
'vendor_id': gpu,
'device_id': device,
'vendor_string': vendor_string,
'device_string': device_string
},
],
'aux_attributes': {
'passthrough_cmd_decoder': passthrough
}
}
}
if gl_renderer:
sys_info['gpu']['aux_attributes']['gl_renderer'] = gl_renderer
return system_info.SystemInfo.FromDict(sys_info)
def _GetTagsToTest(browser, test_class=None):
test_class = test_class or gpu_integration_test.GpuIntegrationTest
tags = None
with mock.patch.object(
test_class, 'ExpectationsFiles', return_value=['exp.txt']):
tags = set(test_class.GetPlatformTags(browser))
return tags
def _GenerateNvidiaExampleTagsForTestClassAndArgs(test_class, args):
tags = None
with mock.patch.object(
test_class, 'ExpectationsFiles', return_value=['exp.txt']):
_ = [_ for _ in test_class.GenerateGpuTests(args)]
platform = fakes.FakePlatform('win', 'win10')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
gpu=VENDOR_NVIDIA, device=0x1cb3, gl_renderer='ANGLE Direct3D9')
tags = _GetTagsToTest(browser, test_class)
return tags
class _IntegrationTestArgs(object):
"""Struct-like object for defining an integration test."""
def __init__(self, test_name):
self.test_name = test_name
self.failures = []
self.successes = []
self.skips = []
self.additional_args = []
class GpuIntegrationTestUnittest(unittest.TestCase):
def setUp(self):
self._test_state = {}
self._test_result = {}
def _RunGpuIntegrationTests(self, test_name, extra_args=None):
extra_args = extra_args or []
unittest_config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
])
with binary_manager.TemporarilyReplaceBinaryManager(None), \
mock.patch.object(gpu_project_config, 'CONFIG', unittest_config):
# TODO(crbug.com/1103792): Using NamedTemporaryFile() as a generator is
# causing windows bots to fail. When the issue is fixed with
# tempfile_ext.NamedTemporaryFile(), put it in the list of generators
# starting this with block. Also remove the try finally statement
# below.
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
try:
test_argv = [
test_name,
'--write-full-results-to=%s' % temp_file.name,
# We don't want the underlying typ-based tests to report their
# results to ResultDB.
'--disable-resultsink',
] + extra_args
processed_args = run_gpu_integration_test.ProcessArgs(test_argv)
telemetry_args = browser_test_runner.ProcessConfig(
unittest_config, processed_args)
run_browser_tests.RunTests(telemetry_args)
with open(temp_file.name) as f:
self._test_result = json.load(f)
finally:
temp_file.close()
def testOverrideDefaultRetryArgumentsinRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('run_tests_with_expectations_files',
['--retry-limit=1'])
self.assertEqual(
self._test_result['tests']['a']['b']['unexpected-fail.html']['actual'],
'FAIL FAIL')
def testDefaultRetryArgumentsinRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('run_tests_with_expectations_files')
self.assertEqual(
self._test_result['tests']['a']['b']['expected-flaky.html']['actual'],
'FAIL FAIL FAIL')
def testTestNamePrefixGenerationInRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('simple_integration_unittest')
self.assertIn('expected_failure', self._test_result['tests'])
def _TestTagGenerationForMockPlatform(self, test_class, args):
tag_set = _GenerateNvidiaExampleTagsForTestClassAndArgs(test_class, args)
self.assertTrue(
set([
'win', 'win10', 'angle-d3d9', 'release', 'nvidia', 'nvidia-0x1cb3',
'no-passthrough'
]).issubset(tag_set))
return tag_set
def testGenerateContextLostExampleTagsForAsan(self):
args = gpu_helper.GetMockArgs(is_asan=True)
tag_set = self._TestTagGenerationForMockPlatform(
context_lost_integration_test.ContextLostIntegrationTest, args)
self.assertIn('asan', tag_set)
self.assertNotIn('no-asan', tag_set)
def testGenerateContextLostExampleTagsForNoAsan(self):
args = gpu_helper.GetMockArgs()
tag_set = self._TestTagGenerationForMockPlatform(
context_lost_integration_test.ContextLostIntegrationTest, args)
self.assertIn('no-asan', tag_set)
self.assertNotIn('asan', tag_set)
def testGenerateWebglConformanceExampleTagsForWebglVersion1andAsan(self):
args = gpu_helper.GetMockArgs(is_asan=True, webgl_version='1.0.0')
tag_set = self._TestTagGenerationForMockPlatform(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(set(['asan', 'webgl-version-1']).issubset(tag_set))
self.assertFalse(set(['no-asan', 'webgl-version-2']) & tag_set)
def testGenerateWebglConformanceExampleTagsForWebglVersion2andNoAsan(self):
args = gpu_helper.GetMockArgs(is_asan=False, webgl_version='2.0.0')
tag_set = self._TestTagGenerationForMockPlatform(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(set(['no-asan', 'webgl-version-2']).issubset(tag_set))
self.assertFalse(set(['asan', 'webgl-version-1']) & tag_set)
@mock.patch('sys.platform', 'win32')
def testGenerateNvidiaExampleTags(self):
platform = fakes.FakePlatform('win', 'win10')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
gpu=VENDOR_NVIDIA, device=0x1cb3, gl_renderer='ANGLE Direct3D9')
self.assertEqual(
_GetTagsToTest(browser),
set([
'win', 'win10', 'release', 'nvidia', 'nvidia-0x1cb3', 'angle-d3d9',
'no-passthrough', 'no-swiftshader-gl', 'skia-renderer-disabled',
'no-oop-c'
]))
@mock.patch('sys.platform', 'darwin')
def testGenerateVendorTagUsingVendorString(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
vendor_string=VENDOR_STRING_IMAGINATION,
device_string=DEVICE_STRING_SGX,
passthrough=True,
gl_renderer='ANGLE OpenGL ES')
self.assertEqual(
_GetTagsToTest(browser),
set([
'mac', 'mojave', 'release', 'imagination',
'imagination-PowerVR-SGX-554', 'angle-opengles', 'passthrough',
'no-swiftshader-gl', 'skia-renderer-disabled', 'no-oop-c'
]))
@mock.patch('sys.platform', 'darwin')
def testGenerateVendorTagUsingDeviceString(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
vendor_string='illegal vendor string',
device_string='ANGLE (Imagination, Triangle Monster 3000, 1.0)')
self.assertEqual(
_GetTagsToTest(browser),
set([
'mac', 'mojave', 'release', 'imagination',
'imagination-Triangle-Monster-3000', 'angle-disabled',
'no-passthrough', 'no-swiftshader-gl', 'skia-renderer-disabled',
'no-oop-c'
]))
@mock.patch.dict(os.environ, clear=True)
def testGenerateDisplayServer(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
with mock.patch('sys.platform', 'darwin'):
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
for t in tags:
self.assertFalse(t.startswith('display-server'))
# Python 2's return value.
with mock.patch('sys.platform', 'linux2'):
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
self.assertIn('display-server-x', tags)
os.environ['WAYLAND_DISPLAY'] = 'wayland-0'
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
self.assertIn('display-server-wayland', tags)
# Python 3's return value.
with mock.patch('sys.platform', 'linux'):
del os.environ['WAYLAND_DISPLAY']
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
self.assertIn('display-server-x', tags)
os.environ['WAYLAND_DISPLAY'] = 'wayland-0'
tags = gpu_integration_test.GpuIntegrationTest.GetPlatformTags(browser)
self.assertIn('display-server-wayland', tags)
def testSimpleIntegrationTest(self):
test_args = _IntegrationTestArgs('simple_integration_unittest')
test_args.failures = [
'unexpected_error',
'unexpected_failure',
]
test_args.successes = [
'expected_flaky',
'expected_failure',
]
test_args.skips = ['expected_skip']
test_args.additional_args = [
'--retry-only-retry-on-failure',
'--retry-limit=3',
'--test-name-prefix=unittest_data.integration_tests.SimpleTest.',
]
self._RunIntegrationTest(test_args)
# The number of browser starts include the one call to StartBrowser at the
# beginning of the run of the test suite and for each RestartBrowser call
# which happens after every failure
self.assertEquals(self._test_state['num_browser_starts'], 6)
def testIntegrationTesttWithBrowserFailure(self):
test_args = _IntegrationTestArgs(
'browser_start_failure_integration_unittest')
test_args.successes = [
'unittest_data.integration_tests.BrowserStartFailureTest.restart'
]
self._RunIntegrationTest(test_args)
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testIntegrationTestWithBrowserCrashUponStart(self):
test_args = _IntegrationTestArgs(
'browser_crash_after_start_integration_unittest')
test_args.successes = [
'unittest_data.integration_tests.BrowserCrashAfterStartTest.restart'
]
self._RunIntegrationTest(test_args)
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testRetryLimit(self):
test_args = _IntegrationTestArgs('test_retry_limit')
test_args.failures = [
'unittest_data.integration_tests.TestRetryLimit.unexpected_failure'
]
test_args.additional_args = ['--retry-limit=2']
self._RunIntegrationTest(test_args)
# The number of attempted runs is 1 + the retry limit.
self.assertEquals(self._test_state['num_test_runs'], 3)
def _RunTestsWithExpectationsFiles(self):
test_args = _IntegrationTestArgs('run_tests_with_expectations_files')
test_args.failures = ['a/b/unexpected-fail.html']
test_args.successes = [
'a/b/expected-fail.html',
'a/b/expected-flaky.html',
]
test_args.skips = ['should_skip']
test_args.additional_args = [
'--retry-limit=3',
'--retry-only-retry-on-failure-tests',
('--test-name-prefix=unittest_data.integration_tests.'
'RunTestsWithExpectationsFiles.'),
]
self._RunIntegrationTest(test_args)
def testTestFilterCommandLineArg(self):
test_args = _IntegrationTestArgs('run_tests_with_expectations_files')
test_args.failures = ['a/b/unexpected-fail.html']
test_args.successes = ['a/b/expected-fail.html']
test_args.skips = ['should_skip']
test_args.additional_args = [
'--retry-limit=3',
'--retry-only-retry-on-failure-tests',
('--test-filter=a/b/unexpected-fail.html::a/b/expected-fail.html::'
'should_skip'),
('--test-name-prefix=unittest_data.integration_tests.'
'RunTestsWithExpectationsFiles.'),
]
self._RunIntegrationTest(test_args)
def testUseTestExpectationsFileToHandleExpectedSkip(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['should_skip']
self.assertEqual(results['expected'], 'SKIP')
self.assertEqual(results['actual'], 'SKIP')
self.assertNotIn('is_regression', results)
def testUseTestExpectationsFileToHandleUnexpectedTestFailure(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['unexpected-fail.html']
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL')
self.assertIn('is_regression', results)
def testUseTestExpectationsFileToHandleExpectedFailure(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['expected-fail.html']
self.assertEqual(results['expected'], 'FAIL')
self.assertEqual(results['actual'], 'FAIL')
self.assertNotIn('is_regression', results)
def testUseTestExpectationsFileToHandleExpectedFlakyTest(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['expected-flaky.html']
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL FAIL FAIL PASS')
self.assertNotIn('is_regression', results)
def testRepeat(self):
test_args = _IntegrationTestArgs('test_repeat')
test_args.successes = ['unittest_data.integration_tests.TestRepeat.success']
test_args.additional_args = ['--repeat=3']
self._RunIntegrationTest(test_args)
self.assertEquals(self._test_state['num_test_runs'], 3)
def testAlsoRunDisabledTests(self):
test_args = _IntegrationTestArgs('test_also_run_disabled_tests')
test_args.failures = [
'skip',
'flaky',
]
# Tests that are expected to fail and do fail are treated as test passes
test_args.successes = ['expected_failure']
test_args.additional_args = [
'--all',
'--test-name-prefix',
'unittest_data.integration_tests.TestAlsoRunDisabledTests.',
'--retry-limit=3',
'--retry-only-retry-on-failure',
]
self._RunIntegrationTest(test_args)
self.assertEquals(self._test_state['num_flaky_test_runs'], 4)
self.assertEquals(self._test_state['num_test_runs'], 6)
def testStartBrowser_Retries(self):
class TestException(Exception):
pass
def SetBrowserAndRaiseTestException():
gpu_integration_test.GpuIntegrationTest.browser = (mock.MagicMock())
raise TestException
gpu_integration_test.GpuIntegrationTest.browser = None
gpu_integration_test.GpuIntegrationTest.platform = None
with mock.patch.object(
gpu_integration_test.serially_executed_browser_test_case.\
SeriallyExecutedBrowserTestCase,
'StartBrowser',
side_effect=SetBrowserAndRaiseTestException) as mock_start_browser:
with mock.patch.object(gpu_integration_test.GpuIntegrationTest,
'StopBrowser') as mock_stop_browser:
with self.assertRaises(TestException):
gpu_integration_test.GpuIntegrationTest.StartBrowser()
self.assertEqual(mock_start_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
self.assertEqual(mock_stop_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
def _RunIntegrationTest(self, test_args):
"""Runs an integration and asserts fail/success/skip expectations.
Args:
test_args: A _IntegrationTestArgs instance to use.
"""
config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
])
with binary_manager.TemporarilyReplaceBinaryManager(None), \
tempfile_ext.NamedTemporaryDirectory() as temp_dir:
test_results_path = os.path.join(temp_dir, 'test_results.json')
test_state_path = os.path.join(temp_dir, 'test_state.json')
# We are processing ChromiumConfig instance and getting the argument
# list. Then we pass it directly to run_browser_tests.RunTests. If
# we called browser_test_runner.Run, then it would spawn another
# subprocess which is less efficient.
args = browser_test_runner.ProcessConfig(
config,
[
test_args.test_name,
'--write-full-results-to=%s' % test_results_path,
'--test-state-json-path=%s' % test_state_path,
# We don't want the underlying typ-based tests to report their
# results to ResultDB.
'--disable-resultsink',
] + test_args.additional_args)
run_browser_tests.RunTests(args)
with open(test_results_path) as f:
self._test_result = json.load(f)
with open(test_state_path) as f:
self._test_state = json.load(f)
actual_successes, actual_failures, actual_skips = (_ExtractTestResults(
self._test_result))
self.assertEquals(set(actual_failures), set(test_args.failures))
self.assertEquals(set(actual_successes), set(test_args.successes))
self.assertEquals(set(actual_skips), set(test_args.skips))
def _ExtractTestResults(test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict
and isinstance(test_dict['expected'], six.string_types))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split()
for res in test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['expected'] == test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k), test_dict[k]))
return successes, failures, skips
if __name__ == '__main__':
unittest.main(verbosity=2)
| 37.898305
| 80
| 0.701302
|
4a13087d393ed421a90d0e1a57d491a43b69c806
| 840
|
py
|
Python
|
jam_sesh/spotify/migrations/0003_vote.py
|
jaybrt/JamSesh
|
3a44e45ebd01acc90388a8d69eb48f1a91940507
|
[
"MIT"
] | null | null | null |
jam_sesh/spotify/migrations/0003_vote.py
|
jaybrt/JamSesh
|
3a44e45ebd01acc90388a8d69eb48f1a91940507
|
[
"MIT"
] | null | null | null |
jam_sesh/spotify/migrations/0003_vote.py
|
jaybrt/JamSesh
|
3a44e45ebd01acc90388a8d69eb48f1a91940507
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-03-20 07:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0003_room_current_song'),
('spotify', '0002_auto_20210312_0133'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=50, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('song_id', models.CharField(max_length=50)),
('room', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.room')),
],
),
]
| 32.307692
| 114
| 0.6
|
4a1308ce823e3010a4c22c7d801b835db386e18f
| 641
|
py
|
Python
|
mct_zoom_tool/src/mct_zoom_tool/zoom_tool_master.py
|
iorodeo/mct
|
fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11
|
[
"Apache-2.0"
] | null | null | null |
mct_zoom_tool/src/mct_zoom_tool/zoom_tool_master.py
|
iorodeo/mct
|
fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11
|
[
"Apache-2.0"
] | null | null | null |
mct_zoom_tool/src/mct_zoom_tool/zoom_tool_master.py
|
iorodeo/mct
|
fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import roslib
roslib.load_manifest('mct_zoom_tool')
import rospy
from mct_msg_and_srv.srv import CommandString
def zoom_tool_master_srv(cmd):
proxy = rospy.ServiceProxy('/zoom_tool_master', CommandString)
resp = proxy(cmd)
return resp.flag, resp.message
def start():
return zoom_tool_master_srv('start')
def stop():
return zoom_tool_master_srv('stop')
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import sys
cmd = sys.argv[1]
if cmd == 'start':
start()
elif cmd == 'stop':
stop()
| 21.366667
| 79
| 0.605304
|
4a1309920b76ca164ee9888602910effdb8f9719
| 964
|
py
|
Python
|
backend/app/consumers.py
|
schajee/boilerplate
|
30b30519d837b8c1dac4c480eff1e5635c285951
|
[
"MIT"
] | 4
|
2021-11-30T11:08:08.000Z
|
2022-01-14T12:51:39.000Z
|
backend/app/consumers.py
|
schajee/boilerplate
|
30b30519d837b8c1dac4c480eff1e5635c285951
|
[
"MIT"
] | null | null | null |
backend/app/consumers.py
|
schajee/boilerplate
|
30b30519d837b8c1dac4c480eff1e5635c285951
|
[
"MIT"
] | null | null | null |
# rest/consumers.py
from channels.generic.websocket import AsyncWebsocketConsumer
import json
class NotifyConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['username']
self.room_group_name = 'notify_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from room group
async def message(self, event):
# Send message to WebSocket
await self.send(text_data=json.dumps(event))
async def progress(self, event):
# Send message to WebSocket
await self.send(text_data=json.dumps(event))
| 29.212121
| 70
| 0.651452
|
4a130a12e4e24c82aa0f5bdaf8c7077196e45a6b
| 2,731
|
py
|
Python
|
profile_collection/startup/csx1/startup/settings.py
|
NSLS-II-CSX/xf23id1_profiles
|
88b24b0bae222f4d69c278c5e3da8a9560c846d0
|
[
"BSD-3-Clause"
] | null | null | null |
profile_collection/startup/csx1/startup/settings.py
|
NSLS-II-CSX/xf23id1_profiles
|
88b24b0bae222f4d69c278c5e3da8a9560c846d0
|
[
"BSD-3-Clause"
] | 57
|
2016-03-05T16:37:55.000Z
|
2022-02-16T18:43:33.000Z
|
profile_collection/startup/csx1/startup/settings.py
|
NSLS-II-CSX/xf23id1_profiles
|
88b24b0bae222f4d69c278c5e3da8a9560c846d0
|
[
"BSD-3-Clause"
] | 4
|
2016-03-06T19:40:18.000Z
|
2019-01-24T22:49:30.000Z
|
from bluesky.magics import BlueskyMagics
from .startup import sd
from .detectors import *
from .endstation import *
from .accelerator import *
from .optics import *
from .tardis import *
#
# Setup of sup. data for plans
#
sd.monitors = []
sd.flyers = []
sd.baseline = [theta, delta, gamma, muR,
sx, say, saz,
cryoangle, sy, sz,
epu1, epu2,
slt1, slt2, slt3,
m1a, m3a,
#nanop, tardis,
tardis,
stemp, pgm,
inout, es_diag1_y, diag6_pid]
#bec.disable_baseline() #no print to CLI, just save to datastore
sclr.names.read_attrs=['name1','name2','name3','name4','name5','name6'] # TODO WHAT IS THIS??? - Dan Allan
sclr.channels.read_attrs=['chan1','chan2','chan3','chan4','chan5','chan6']
# Old-style hints config is replaced by the new 'kind' feature
# sclr.hints = {'fields': ['sclr_ch2', 'sclr_ch3', 'sclr_ch6']}
for i in [2, 3, 4, 5]:
getattr(sclr.channels, f'chan{i}').kind = 'hinted'
# getattr(sclr.channels, f'chan{i}').kind = 'normal' will remove the
# hinted fields from LivePlot and LiveTable.
def relabel_fig(fig, new_label):
fig.set_label(new_label)
fig.canvas.manager.set_window_title(fig.get_label())
# fccd.hints = {'fields': ['fccd_stats1_total']}
for i in [1, 2, 3, 4, 5]:
getattr(fccd, f'stats{i}').total.kind = 'hinted'
# dif_beam.hints = {'fields' : ['dif_beam_stats3_total','dif_beam_stats1_total']}
for i in [1, 3]:
getattr(dif_beam, f'stats{i}').total.kind = 'hinted'
## 20180726 needed to comment due to IOC1 problems
#cube_beam.hints = {'fields': ['cube_beam_stats2_total', 'cube_beam_stats1_total']}
#for i in [1, 2]:
# getattr(cube_beam, f'stats{i}').total.kind = 'hinted'
# This was imported in 00-startup.py # used to generate the list: [thing.name for thing in get_all_positioners()]
"""
BlueskyMagics.positioners = [
cryoangle,
delta,
diag2_y,
diag3_y,
diag5_y,
diag6_pid,
diag6_y,
epu1.gap,
epu1.phase,
epu2.gap,
epu2.phase,
es_diag1_y,
eta,
gamma,
m1a.z,
m1a.y,
m1a.x,
m1a.pit,
m1a.yaw,
m1a.rol,
m3a.x,
m3a.pit,
m3a.bdr,
# muR, # TODO turn this back on when safe
# muT, # TODO turn this back on when safe
#nanop.tx,
#nanop.ty,
#nanop.tz,
#nanop.bx,
#nanop.by,
#nanop.bz,
say,
saz,
slt1.xg,
slt1.xc,
slt1.yg,
slt1.yc,
slt2.xg,
slt2.xc,
slt2.yg,
slt2.yc,
slt3.x,
slt3.y,
sx,
sy,
sz,
tardis.h,
tardis.k,
tardis.l,
tardis.theta,
tardis.mu,
tardis.chi,
tardis.phi,
tardis.delta,
tardis.gamma,
theta,
]
"""
| 23.747826
| 114
| 0.599048
|
4a130a13c94dddec86b54f6f146e9e48056f8169
| 131
|
py
|
Python
|
distributions/__init__.py
|
iris-theof/distributions
|
4d7189c599b491dab313804dea6338bad06b478d
|
[
"CNRI-Python",
"Xnet",
"X11"
] | 2
|
2021-01-19T19:00:14.000Z
|
2021-01-21T10:24:32.000Z
|
distributions/__init__.py
|
iris-theof/distributions_package
|
4d7189c599b491dab313804dea6338bad06b478d
|
[
"CNRI-Python",
"Xnet",
"X11"
] | null | null | null |
distributions/__init__.py
|
iris-theof/distributions_package
|
4d7189c599b491dab313804dea6338bad06b478d
|
[
"CNRI-Python",
"Xnet",
"X11"
] | null | null | null |
from .Gaussiandistribution import Gaussian
from .Binomialdistribution import Binomial
from .Bernoullidistribution import Bernoulli
| 32.75
| 44
| 0.885496
|
4a130a81ba3d3fb695a634a4f065f212d9f7de38
| 1,611
|
py
|
Python
|
nailgun/nailgun/api/v1/handlers/plugin.py
|
Zipfer/fuel-web
|
c6c4032eb6e29474e2be0318349265bdb566454c
|
[
"Apache-2.0"
] | null | null | null |
nailgun/nailgun/api/v1/handlers/plugin.py
|
Zipfer/fuel-web
|
c6c4032eb6e29474e2be0318349265bdb566454c
|
[
"Apache-2.0"
] | null | null | null |
nailgun/nailgun/api/v1/handlers/plugin.py
|
Zipfer/fuel-web
|
c6c4032eb6e29474e2be0318349265bdb566454c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.v1.handlers import base
from nailgun.api.v1.handlers.base import content
from nailgun.api.v1.validators import plugin
from nailgun import objects
class PluginHandler(base.SingleHandler):
validator = plugin.PluginValidator
single = objects.Plugin
class PluginCollectionHandler(base.CollectionHandler):
collection = objects.PluginCollection
validator = plugin.PluginValidator
@content
def POST(self):
""":returns: JSONized REST object.
:http: * 201 (object successfully created)
* 400 (invalid object data specified)
* 409 (object with such parameters already exists)
"""
data = self.checked_data(self.validator.validate)
obj = self.collection.single.get_by_name_version(
data['name'], data['version'])
if obj:
raise self.http(409, self.collection.single.to_json(obj))
return super(PluginCollectionHandler, self).POST()
| 34.276596
| 78
| 0.696462
|
4a130ac762c07f687c837f149e8b3b27cd0e56ec
| 13,187
|
py
|
Python
|
tests/gcp/operators/test_dataflow.py
|
rolanddb/incubator-airflow
|
e090744787458b50d7eb35bd3c66f15fba7322c2
|
[
"Apache-2.0"
] | 2
|
2020-10-12T05:21:27.000Z
|
2021-07-07T09:23:47.000Z
|
tests/gcp/operators/test_dataflow.py
|
sb2nov/airflow
|
e405be0141a996c5bb3659e3f19cab0e1ac8dc8d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3
|
2021-03-11T06:46:16.000Z
|
2021-09-29T17:48:20.000Z
|
tests/gcp/operators/test_dataflow.py
|
sb2nov/airflow
|
e405be0141a996c5bb3659e3f19cab0e1ac8dc8d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow.gcp.operators.dataflow import \
DataFlowPythonOperator, DataFlowJavaOperator, \
DataflowTemplateOperator, GoogleCloudBucketHelper, CheckJobRunning
from airflow.version import version
from tests.compat import mock
TASK_ID = 'test-dataflow-operator'
JOB_NAME = 'test-dataflow-pipeline'
TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output'
}
PY_FILE = 'gs://my-bucket/my-object.py'
JAR_FILE = 'example/test.jar'
JOB_CLASS = 'com.test.NotMain'
PY_OPTIONS = ['-m']
DEFAULT_OPTIONS_PYTHON = DEFAULT_OPTIONS_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
}
DEFAULT_OPTIONS_TEMPLATE = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar'}
}
TEST_VERSION = 'v{}'.format(version.replace('.', '-').replace('+', '-'))
EXPECTED_ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
POLL_SLEEP = 30
GCS_HOOK_STRING = 'airflow.gcp.operators.dataflow.{}'
class DataFlowPythonOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowPythonOperator(
task_id=TASK_ID,
py_file=PY_FILE,
job_name=JOB_NAME,
py_options=PY_OPTIONS,
dataflow_default_options=DEFAULT_OPTIONS_PYTHON,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataFlowPythonOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.py_file, PY_FILE)
self.assertEqual(self.dataflow.py_options, PY_OPTIONS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_PYTHON)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.gcp.operators.dataflow.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_python_workflow.
"""
start_python_hook = dataflow_mock.return_value.start_python_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'staging_location': 'gs://test/staging',
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
gcs_download_hook.assert_called_once_with(PY_FILE)
start_python_hook.assert_called_once_with(JOB_NAME, expected_options, mock.ANY,
PY_OPTIONS)
self.assertTrue(self.dataflow.py_file.startswith('/tmp/dataflow'))
class DataFlowJavaOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowJavaOperator(
task_id=TASK_ID,
jar=JAR_FILE,
job_name=JOB_NAME,
job_class=JOB_CLASS,
dataflow_default_options=DEFAULT_OPTIONS_JAVA,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_JAVA)
self.assertEqual(self.dataflow.job_class, JOB_CLASS)
self.assertEqual(self.dataflow.jar, JAR_FILE)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
self.assertEqual(self.dataflow.check_if_running, CheckJobRunning.WaitForRun)
@mock.patch('airflow.gcp.operators.dataflow.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_java_workflow.
"""
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.check_if_running = CheckJobRunning.IgnoreJob
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_download_hook.assert_called_once_with(JAR_FILE)
start_java_hook.assert_called_once_with(JOB_NAME, mock.ANY,
mock.ANY, JOB_CLASS, True, None)
@mock.patch('airflow.gcp.operators.dataflow.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_check_job_running_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_java_workflow.
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = True
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_download_hook.assert_not_called()
start_java_hook.assert_not_called()
dataflow_running.assert_called_once_with(JOB_NAME, mock.ANY)
@mock.patch('airflow.gcp.operators.dataflow.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_check_job_not_running_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_java_workflow with option to check if job is running
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = False
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_download_hook.assert_called_once_with(JAR_FILE)
start_java_hook.assert_called_once_with(JOB_NAME, mock.ANY,
mock.ANY, JOB_CLASS, True, None)
dataflow_running.assert_called_once_with(JOB_NAME, mock.ANY)
@mock.patch('airflow.gcp.operators.dataflow.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_check_multiple_job_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_java_workflow with option to check multiple jobs
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = False
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.multiple_jobs = True
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_download_hook.assert_called_once_with(JAR_FILE)
start_java_hook.assert_called_once_with(JOB_NAME, mock.ANY,
mock.ANY, JOB_CLASS, True, True)
dataflow_running.assert_called_once_with(JOB_NAME, mock.ANY)
class DataFlowTemplateOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowTemplateOperator(
task_id=TASK_ID,
template=TEMPLATE,
job_name=JOB_NAME,
parameters=PARAMETERS,
dataflow_default_options=DEFAULT_OPTIONS_TEMPLATE,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.template, TEMPLATE)
self.assertEqual(self.dataflow.parameters, PARAMETERS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_TEMPLATE)
@mock.patch('airflow.gcp.operators.dataflow.DataFlowHook')
def test_exec(self, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_template_workflow.
"""
start_template_hook = dataflow_mock.return_value.start_template_dataflow
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
start_template_hook.assert_called_once_with(JOB_NAME, expected_options,
PARAMETERS, TEMPLATE)
class GoogleCloudBucketHelperTest(unittest.TestCase):
@mock.patch(
'airflow.gcp.operators.dataflow.GoogleCloudBucketHelper.__init__'
)
def test_invalid_object_path(self, mock_parent_init):
# This is just the path of a bucket hence invalid filename
file_name = 'gs://test-bucket'
mock_parent_init.return_value = None
gcs_bucket_helper = GoogleCloudBucketHelper()
gcs_bucket_helper._gcs_hook = mock.Mock()
with self.assertRaises(Exception) as context:
gcs_bucket_helper.google_cloud_to_local(file_name)
self.assertEqual(
'Invalid Google Cloud Storage (GCS) object path: {}'.format(file_name),
str(context.exception))
@mock.patch(
'airflow.gcp.operators.dataflow.GoogleCloudBucketHelper.__init__'
)
def test_valid_object(self, mock_parent_init):
file_name = 'gs://test-bucket/path/to/obj.jar'
mock_parent_init.return_value = None
gcs_bucket_helper = GoogleCloudBucketHelper()
gcs_bucket_helper._gcs_hook = mock.Mock()
# pylint:disable=redefined-builtin,unused-argument
def _mock_download(bucket, object, filename=None):
text_file_contents = 'text file contents'
with open(filename, 'w') as text_file:
text_file.write(text_file_contents)
return text_file_contents
gcs_bucket_helper._gcs_hook.download.side_effect = _mock_download
local_file = gcs_bucket_helper.google_cloud_to_local(file_name)
self.assertIn('obj.jar', local_file)
@mock.patch(
'airflow.gcp.operators.dataflow.GoogleCloudBucketHelper.__init__'
)
def test_empty_object(self, mock_parent_init):
file_name = 'gs://test-bucket/path/to/obj.jar'
mock_parent_init.return_value = None
gcs_bucket_helper = GoogleCloudBucketHelper()
gcs_bucket_helper._gcs_hook = mock.Mock()
# pylint:disable=redefined-builtin,unused-argument
def _mock_download(bucket, object, filename=None):
text_file_contents = ''
with open(filename, 'w') as text_file:
text_file.write(text_file_contents)
return text_file_contents
gcs_bucket_helper._gcs_hook.download.side_effect = _mock_download
with self.assertRaises(Exception) as context:
gcs_bucket_helper.google_cloud_to_local(file_name)
self.assertEqual(
'Failed to download Google Cloud Storage (GCS) object: {}'.format(file_name),
str(context.exception))
| 41.468553
| 89
| 0.691059
|
4a130aeea945ca532e4bb34bc3c4867af151c42d
| 3,832
|
py
|
Python
|
.history/First_Wish/settings_20210906110055.py
|
Sahil1515/First-Wish-Website
|
de973f2a5c682b142c6faba4b127e4d83291dac5
|
[
"MIT"
] | null | null | null |
.history/First_Wish/settings_20210906110055.py
|
Sahil1515/First-Wish-Website
|
de973f2a5c682b142c6faba4b127e4d83291dac5
|
[
"MIT"
] | null | null | null |
.history/First_Wish/settings_20210906110055.py
|
Sahil1515/First-Wish-Website
|
de973f2a5c682b142c6faba4b127e4d83291dac5
|
[
"MIT"
] | null | null | null |
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
def func():
print("\n\nWorking\n\n")
schedule.every().day.at("11:00").do(func)
# schedule.every().day.at("11:00").do(decrease_day_count_and_send_bday_mails)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
| 24.253165
| 91
| 0.709029
|
4a130c49121da4b40b17cfd9e14a076b6686dcae
| 1,989
|
py
|
Python
|
ExamplePCB.py
|
lrh2999/PCBpy
|
95e4d373a24238ed56989a67ac78e991878facc9
|
[
"MIT"
] | 1
|
2020-05-24T06:17:57.000Z
|
2020-05-24T06:17:57.000Z
|
ExamplePCB.py
|
lrh2999/PCBpy
|
95e4d373a24238ed56989a67ac78e991878facc9
|
[
"MIT"
] | null | null | null |
ExamplePCB.py
|
lrh2999/PCBpy
|
95e4d373a24238ed56989a67ac78e991878facc9
|
[
"MIT"
] | null | null | null |
from PCBpy import *
schem_data = {
'cad_filename': 'pstxref.dat',
'loop_a_net': ('XCVU160', 'XCKU095', 'CON38P'),
'loop_b_pin': ('AFBR')
}
part_specific_data = [
{
'xlx_filename': 'xcvu9pflgc2104pkg.csv',
'cad_part_num': 'XCVU160-H1FLGC2104',
'cad_instance': 'IC4',
'gt_types': ('MGTH', 'MGTY'),
'first_gtquad': 19,
'gtquad_initial': [1, 2],
'ibert_path':'localhost:3121/xilinx_tcf/*/0_1_0_*'
},
{
'xlx_filename': 'xcvu9pflgc2104pkg.csv',
'cad_part_num': 'XCVU160-H1FLGC2104',
'cad_instance': 'IC15',
'gt_types': ('MGTH', 'MGTY'),
'first_gtquad': 19,
'gtquad_initial': [1, 2],
'ibert_path':'localhost:3121/xilinx_tcf/*/1_1_0_*'
},
{
'xlx_filename': 'xcku095ffvb2104pkg.csv',
'cad_part_num': 'XCKU095-1FFVB2104C',
'cad_instance': 'IC39',
'gt_types': ('MGTH', 'MGTY'),
'first_gtquad': 24,
'gtquad_initial': [1, 2],
'ibert_path':'localhost:3121/xilinx_tcf/*/2_1_0_*'
},
{
'xlx_filename': 'xczu3egsfvc784pkg.csv',
'cad_part_num': 'XCZU3EG-1SFVC784E',
'cad_instance': 'IC84',
'gt_types': ('PS_MGTRRX', 'PS_MGTRTX'),
'first_gtquad': 5,
'gtquad_initial': [5],
'ibert_path':'localhost:3121/xilinx_tcf/*/3_1_0_*'
}
]
for part in part_specific_data:
PCBpy(part, schem_data)
# checking cross references
rfname = 'in/cad/basenets.txt'
start_skip_conditions = ['%','Title:','Design:','Date:','Base','\n']
with open(rfname) as rf:
content = rf.readlines()
conditions = [
[endswith, 'P'],
[endswith, 'N'],
[endswith, '*'],
[anywhere, 'SCL'],
[anywhere, 'SDA'],
[anywhere, 'TCK'],
[anywhere, 'TDI'],
[anywhere, 'TDO'],
[anywhere, 'TMS'],
[anywhere, '12V'],
]
for c in conditions:
print "-----------------------------"
check_file(content, c[0], c[1], start_skip_conditions)
| 26.52
| 68
| 0.557064
|
4a130cc69dd231c938d774cab9de4435737f5a62
| 10,485
|
py
|
Python
|
src/cmds/farm_funcs.py
|
Flofie/chia-blockchain
|
d3013f1a392fc1761d975581a7b1d0770f92cb14
|
[
"Apache-2.0"
] | null | null | null |
src/cmds/farm_funcs.py
|
Flofie/chia-blockchain
|
d3013f1a392fc1761d975581a7b1d0770f92cb14
|
[
"Apache-2.0"
] | null | null | null |
src/cmds/farm_funcs.py
|
Flofie/chia-blockchain
|
d3013f1a392fc1761d975581a7b1d0770f92cb14
|
[
"Apache-2.0"
] | null | null | null |
import math
from typing import Any, Dict, List, Optional
import aiohttp
from src.cmds.units import units
from src.consensus.block_record import BlockRecord
from src.rpc.farmer_rpc_client import FarmerRpcClient
from src.rpc.full_node_rpc_client import FullNodeRpcClient
from src.rpc.harvester_rpc_client import HarvesterRpcClient
from src.rpc.wallet_rpc_client import WalletRpcClient
from src.util.config import load_config
from src.util.default_root import DEFAULT_ROOT_PATH
from src.util.ints import uint16
SECONDS_PER_BLOCK = (24 * 3600) / 4608
async def get_plots(harvester_rpc_port: int) -> Optional[Dict[str, Any]]:
plots = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if harvester_rpc_port is None:
harvester_rpc_port = config["harvester"]["rpc_port"]
harvester_client = await HarvesterRpcClient.create(
self_hostname, uint16(harvester_rpc_port), DEFAULT_ROOT_PATH, config
)
plots = await harvester_client.get_plots()
except Exception as e:
if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
print(f"Connection error. Check if harvester is running at {harvester_rpc_port}")
else:
print(f"Exception from 'harvester' {e}")
harvester_client.close()
await harvester_client.await_closed()
return plots
async def get_blockchain_state(rpc_port: int) -> Optional[Dict[str, Any]]:
blockchain_state = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
except Exception as e:
if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return blockchain_state
async def get_average_block_time(rpc_port: int) -> float:
try:
blocks_to_compare = 500
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
curr: Optional[BlockRecord] = blockchain_state["peak"]
if curr is None or curr.height < (blocks_to_compare + 100):
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
while curr is not None and curr.height > 0 and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
if curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare)
while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block:
past_curr = await client.get_block_record(past_curr.prev_hash)
if past_curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
client.close()
await client.await_closed()
return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height)
except Exception as e:
if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
async def get_wallets_stats(wallet_rpc_port: int) -> Optional[Dict[str, Any]]:
amounts = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
amounts = await wallet_client.get_farmed_amount()
except Exception as e:
if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
print(f"Connection error. Check if wallet is running at {wallet_rpc_port}")
else:
print(f"Exception from 'wallet' {e}")
wallet_client.close()
await wallet_client.await_closed()
return amounts
async def is_farmer_running(farmer_rpc_port: int) -> bool:
is_running = False
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
await farmer_client.get_connections()
is_running = True
except Exception as e:
if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return is_running
async def get_challenges(farmer_rpc_port: int) -> Optional[List[Dict[str, Any]]]:
signage_points = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
signage_points = await farmer_client.get_signage_points()
except Exception as e:
if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return signage_points
async def challenges(farmer_rpc_port: int, limit: int) -> None:
signage_points = await get_challenges(farmer_rpc_port)
if signage_points is None:
return
signage_points.reverse()
if limit != 0:
signage_points = signage_points[:limit]
for signage_point in signage_points:
print(
(
f"Hash: {signage_point['signage_point']['challenge_hash']}"
f"Index: {signage_point['signage_point']['signage_point_index']}"
)
)
async def summary(rpc_port: int, wallet_rpc_port: int, harvester_rpc_port: int, farmer_rpc_port: int) -> None:
amounts = await get_wallets_stats(wallet_rpc_port)
plots = await get_plots(harvester_rpc_port)
blockchain_state = await get_blockchain_state(rpc_port)
farmer_running = await is_farmer_running(farmer_rpc_port)
print("Farming status: ", end="")
if blockchain_state is None:
print("Not available")
elif blockchain_state["sync"]["sync_mode"]:
print("Syncing")
elif not blockchain_state["sync"]["synced"]:
print("Not synced or not connected to peers")
elif not farmer_running:
print("Not running")
else:
print("Farming")
if amounts is not None:
print(f"Total chia farmed: {amounts['farmed_amount'] / units['chia']}")
print(f"User transaction fees: {amounts['fee_amount'] / units['chia']}")
print(f"Block rewards: {(amounts['farmer_reward_amount'] + amounts['pool_reward_amount']) / units['chia']}")
print(f"Last height farmed: {amounts['last_height_farmed']}")
else:
print("Total chia farmed: Unknown")
print("User transaction fees: Unknown")
print("Block rewards: Unknown")
print("Last height farmed: Unknown")
total_plot_size = 0
if plots is not None:
total_plot_size = sum(map(lambda x: x["file_size"], plots["plots"]))
print(f"Plot count: {len(plots['plots'])}")
print("Total size of plots: ", end="")
plots_space_human_readable = total_plot_size / 1024 ** 3
if plots_space_human_readable >= 1024 ** 2:
plots_space_human_readable = plots_space_human_readable / (1024 ** 2)
print(f"{plots_space_human_readable:.3f} PiB")
elif plots_space_human_readable >= 1024:
plots_space_human_readable = plots_space_human_readable / 1024
print(f"{plots_space_human_readable:.3f} TiB")
else:
print(f"{plots_space_human_readable:.3f} GiB")
else:
print("Plot count: Unknown")
print("Total size of plots: Unknown")
if blockchain_state is not None:
print("Estimated network space: ", end="")
network_space_human_readable = blockchain_state["space"] / 1024 ** 4
if network_space_human_readable >= 1024:
network_space_human_readable = network_space_human_readable / 1024
print(f"{network_space_human_readable:.3f} PiB")
else:
print(f"{network_space_human_readable:.3f} TiB")
else:
print("Estimated network space: Unknown")
if blockchain_state is not None and plots is not None:
proportion = total_plot_size / blockchain_state["space"] if blockchain_state["space"] else 0
minutes = (await get_average_block_time(rpc_port) / 60) / proportion if proportion else 0
print("Expected time to win: ", end="")
if minutes == 0:
print("Unknown")
elif minutes > 60 * 24:
print(f"{math.floor(minutes/(60*24))} days")
elif minutes > 60:
print(f"{math.floor(minutes/60)} hours")
else:
print(f"{math.floor(minutes)} minutes")
else:
print("Expected time to win: Unknown")
print("Note: log into your key using 'chia wallet show' to see rewards for each key")
| 40.326923
| 119
| 0.673247
|
4a130cd320bce08ab8920a47c9a505340e2806ef
| 3,810
|
py
|
Python
|
smallbusiness/hooks.py
|
ashish-greycube/smallbusiness
|
4aeb7e31f599c5ee1b0077cfae56ee207c748f28
|
[
"MIT"
] | null | null | null |
smallbusiness/hooks.py
|
ashish-greycube/smallbusiness
|
4aeb7e31f599c5ee1b0077cfae56ee207c748f28
|
[
"MIT"
] | null | null | null |
smallbusiness/hooks.py
|
ashish-greycube/smallbusiness
|
4aeb7e31f599c5ee1b0077cfae56ee207c748f28
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "smallbusiness"
app_title = "Small Business App"
app_publisher = "GreyCube Technologies"
app_description = "It is scale down version of erpnext for small business"
app_icon = "octicon octicon-squirrel"
app_color = "#2defbb"
app_email = "admin@greycube.in"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/smallbusiness/css/smallbusiness.css"
app_include_css = [
"/assets/smallbusiness/css/bdtheme.css",
"/assets/smallbusiness/css/skin-blue.css",
"/assets/smallbusiness/css/custom.css",
"/assets/smallbusiness/css/temp.css",
]
#app_include_css = "/assets/ni_dark_theme/css/ni.dark.theme.css"
#app_include_js = ["/assets/smallbusiness/js/smallbusiness.js"]
app_include_js = [
"/assets/smallbusiness/js/smallbusiness.js",
"/assets/smallbusiness/js/bdtheme.js",
"/assets/smallbusiness/js/custom.js",
"/assets/js/bdtheme-template.min.js",
]
# include js, css files in header of web template
# web_include_css = "/assets/smallbusiness/css/smallbusiness.css"
web_include_css = "/assets/smallbusiness/css/bdtheme-web.css"
# web_include_js = "/assets/smallbusiness/js/smallbusiness.js"
# include js in page
page_js = {"modules" : "public/js/smallbusiness.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
website_context = {
"favicon": "/assets/smallbusiness/images/favicon.png",
"splash_image": "/assets/smallbusiness/images/icon.png"
}
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "smallbusiness.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
before_install = "smallbusiness.install.before_install"
#after_install = "smallbusiness.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "smallbusiness.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "smallbusiness.tasks.all"
# ],
# "daily": [
# "smallbusiness.tasks.daily"
# ],
# "hourly": [
# "smallbusiness.tasks.hourly"
# ],
# "weekly": [
# "smallbusiness.tasks.weekly"
# ]
# "monthly": [
# "smallbusiness.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "smallbusiness.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "smallbusiness.event.get_events"
# }
fixtures = [{
"doctype": "DocType",
"filters": { "custom" : ["=", "1"] }
},
"Custom Field",
"Custom Script",
"Property Setter",
"Print Format"
]
| 25.918367
| 81
| 0.676378
|
4a130dcf1ee0ccd414b6a27b466229aed524039b
| 8,022
|
py
|
Python
|
archivedtst/archive/test_scripts/test_functions 5.py
|
judejeh/rom-comma
|
2cace7c4d9d72a35237bc7ddc0f54aec3b9b1d63
|
[
"BSD-3-Clause"
] | 1
|
2021-06-08T16:01:09.000Z
|
2021-06-08T16:01:09.000Z
|
archivedtst/archive/test_scripts/test_functions 5.py
|
judejeh/rom-comma
|
2cace7c4d9d72a35237bc7ddc0f54aec3b9b1d63
|
[
"BSD-3-Clause"
] | null | null | null |
archivedtst/archive/test_scripts/test_functions 5.py
|
judejeh/rom-comma
|
2cace7c4d9d72a35237bc7ddc0f54aec3b9b1d63
|
[
"BSD-3-Clause"
] | 2
|
2021-07-05T11:58:05.000Z
|
2021-11-06T17:35:11.000Z
|
# BSD 3-Clause License
#
# Copyright (c) 2019, Robert A. Milton
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Run this module first thing, to test your installation of romcomma.
**Contents**:
**predict**: Prediction using a GaussianBundle.
**test_input**: A rudimentary test input, for installation testing.
"""
from romcomma import distribution, function, data, model
from romcomma.typing_ import NP
from numpy import zeros, eye, pi, full, matmul
from pathlib import Path
from scipy.stats import ortho_group
EFFECTIVELY_ZERO = 1.0E-64
BASE_PATH = Path('X:\\comma_group1\\Rom\\dat\\TestFunctions\\Scalar.2')
NOISELESS_DIR = 'Noiseless'
NORMAL_DIR = 'Normal'
UNIFORM_DIR = 'Uniform'
NORMAL_CDF_DIR = 'NormalCDF'
def store_dir(store_name: str, noise_std: float, CDF_scale: NP.Array=None) -> Path:
if noise_std <= EFFECTIVELY_ZERO:
return BASE_PATH / NOISELESS_DIR / store_name
elif CDF_scale is None:
return BASE_PATH / NORMAL_DIR / store_name
else:
return BASE_PATH / NORMAL_CDF_DIR / store_name
def scalar_function_of_normal(store_name: str, N: int, M: int, X_std: float, noise_std: float, CDF_scale: NP.Array=None, CDF_loc: NP.Array=None,
pre_function_with_parameters: function.CallableWithParameters = None,
function_with_parameters: function.CallableWithParameters = None) -> data.Store:
X_marginal = distribution.Univariate('norm', loc=0, scale=X_std)
X_dist = distribution.Multivariate.Independent(M=M, marginals=X_marginal)
noise_dist = (distribution.Multivariate.Normal(mean=zeros(1, dtype=float), covariance=noise_std ** 2 * eye(1, dtype=float))
if noise_std > EFFECTIVELY_ZERO else None)
return function.sample(store_dir=store_dir(store_name, noise_std, CDF_scale), N=N, X_distribution=X_dist,
X_sample_design=distribution.SampleDesign.LATIN_HYPERCUBE, CDF_scale=CDF_scale,
CDF_loc=CDF_loc, pre_function_with_parameters=pre_function_with_parameters,
functions_with_parameters=function_with_parameters,
noise_distribution=noise_dist, noise_sample_design=distribution.SampleDesign.LATIN_HYPERCUBE)
def reverse_matrix(M: int) -> NP.Matrix:
result = zeros((M, M), dtype=float)
for i in range(M):
result[i, M-i-1] = 1.0
return result
def run_roms(M: int, N: int, K:int, random: bool, noisy: bool):
name = 'ard'
kernel_parameters = model.gpy_.Kernel.ExponentialQuadratic.Parameters(lengthscale=full((1, M), 0.2, dtype=float))
store_name = 'sin.u1.'
CDF_scale = 2 * pi
CDF_loc = pi
function_with_parameters = function.CallableWithParameters(function=function.ishigami, parameters={'a': 0, 'b': 0})
store_name = store_name + '{0:d}.{1:d}'.format(N, M)
if random:
pre_function_with_parameters = function.CallableWithParameters(function=function.linear, parameters={'matrix': ortho_group.rvs(M)})
store_name += '.random'
else:
pre_function_with_parameters = None
store_name += '.rom'
if noisy:
noise_std = 0.001
parameters = model.gpy_.GP.DEFAULT_PARAMETERS._replace(kernel=kernel_parameters)
else:
noise_std = 0
parameters = model.gpy_.GP.DEFAULT_PARAMETERS._replace(kernel=kernel_parameters, e_floor=1E-6)
store = scalar_function_of_normal(store_name=store_name, N=N, M=M, X_std=1.0, noise_std=noise_std, CDF_scale=CDF_scale, CDF_loc=CDF_loc,
pre_function_with_parameters=pre_function_with_parameters,
function_with_parameters=function_with_parameters)
data.Fold.into_K_folds(parent=store, K=K, shuffled_before_folding=False, standard=data.Store.Standard.mean_and_std,
replace_empty_test_with_data_=True)
model.run.GPs(module=model.run.Module.GPY_, name=name, store=store, M_Used=-1, parameters=parameters, optimize=True, test=True, sobol=True)
sobol_options = {'semi_norm': model.base.Sobol.SemiNorm.DEFAULT_META, 'N_exploit': 1, 'N_explore': 2048, 'options': {'gtol': 1.0E-12}}
rom_options = {'iterations': 6, 'guess_identity_after_iteration': 2, 'sobol_optimizer_options': sobol_options,
'gp_initializer': model.base.ROM.GP_Initializer.CURRENT_WITH_GUESSED_LENGTHSCALE,
'gp_optimizer_options': model.run.Module.GPY_.value.GP.DEFAULT_OPTIMIZER_OPTIONS}
model.run.ROMs(module=model.run.Module.GPY_, name='rom', store=store, source_gp_name='ard', Mu=-1, Mx=-1, optimizer_options=rom_options)
def predict_roms(M: int, N: int, random: bool, noisy: bool):
store_name = 'sin.u1.'
CDF_scale = 2 * pi
CDF_loc = pi
function_with_parameters = function.CallableWithParameters(function=function.ishigami, parameters={'a': 0, 'b': 0})
store_name = store_name + '{0:d}.{1:d}'.format(N, M)
store_name += '.random' if random else '.rom'
noise_std = 0.0001 if noisy else 0
store = data.Store(store_dir(store_name, noise_std, CDF_scale), data.Store.InitMode.READ_META_ONLY)
fold = data.Fold(store, 0)
rom = model.gpy_.ROM.from_ROM(fold=fold, name='rom', suffix='.test.full')
model_theta = rom.sobol.parameters_read.Theta
data_theta = function.linear_matrix_from_meta(store)
pre_function_with_parameters = (function.CallableWithParameters(function=function.linear, parameters={'matrix': data_theta}) if random
else None)
test_store = scalar_function_of_normal(store_name=store_name + "\\test", N=N, M=M, X_std=1.0, noise_std=noise_std, CDF_scale=CDF_scale,
CDF_loc=CDF_loc,
pre_function_with_parameters=pre_function_with_parameters,
function_with_parameters=function_with_parameters)
fold.set_test_data(df=test_store.data.df)
rom.sobol.gp.test()
result = matmul(model_theta, data_theta.T)
print(result)
if __name__ == '__main__':
for N in (200, 400, 800, 1600, 3200, 6400):
for random in (True, False):
for noisy in (True, False):
for M in (5, 10):
run_roms(M, N, 2, random=random, noisy=noisy)
"""
for N in (100, 1000, 5000):
for random in (True, False):
for noisy in (True, False):
for M in (5,):
run_roms(M, N, random=random, noisy=noisy)
"""
| 52.431373
| 144
| 0.693468
|
4a130ef5f091fb8831942e6268a511a46103389d
| 159
|
py
|
Python
|
setup.py
|
SumnerLab/CASA-Dialogue-Act-Classifier
|
6b7dacd250b7231878902e8ccc48fb7390212935
|
[
"MIT"
] | 1
|
2021-01-04T21:38:24.000Z
|
2021-01-04T21:38:24.000Z
|
setup.py
|
SumnerLab/CASA-Dialogue-Act-Classifier
|
6b7dacd250b7231878902e8ccc48fb7390212935
|
[
"MIT"
] | null | null | null |
setup.py
|
SumnerLab/CASA-Dialogue-Act-Classifier
|
6b7dacd250b7231878902e8ccc48fb7390212935
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name='context_aware_dialogue_act_classifier',
version='0.1',
packages = find_packages(),
)
| 26.5
| 51
| 0.716981
|
4a130f4de9864a677f237b756bf704feb0800d64
| 6,875
|
py
|
Python
|
src/C2_clean_census.py
|
CityOfLosAngeles/planning-entitlements
|
cf83b57063b4e55722cc640172b529611b263b3a
|
[
"Apache-2.0"
] | null | null | null |
src/C2_clean_census.py
|
CityOfLosAngeles/planning-entitlements
|
cf83b57063b4e55722cc640172b529611b263b3a
|
[
"Apache-2.0"
] | 55
|
2020-01-08T17:50:17.000Z
|
2021-01-13T21:45:31.000Z
|
src/C2_clean_census.py
|
CityOfLosAngeles/planning-entitlements
|
cf83b57063b4e55722cc640172b529611b263b3a
|
[
"Apache-2.0"
] | 2
|
2020-07-16T02:10:30.000Z
|
2021-01-25T21:14:49.000Z
|
# Clean Census data
import numpy as np
import pandas as pd
import re
from datetime import datetime
from tqdm import tqdm
tqdm.pandas()
bucket_name = 'city-planning-entitlements'
"""
# Compile individual census tables into 1 parquet file
full_df = pd.DataFrame()
for name in ['commute', 'income', 'income_range', 'vehicles', 'tenure', 'race', 'raceethnicity']:
file_name = f'{name}_tract'
df = pd.read_csv(f's3://{bucket_name}/data/source/{file_name}.csv', dtype={"GEOID": "str"})
df = df[['GEOID', 'variable', 'estimate', 'year']]
df['GEOID'] = df.GEOID.str.pad(width = 11, side = 'left', fillchar = '0')
full_df = full_df.append(df, sort = False)
full_df.to_parquet(f's3://{bucket_name}/data/raw/raw_census.parquet')
"""
#--------------------------------------------------------------------#
## Functions to be used
#--------------------------------------------------------------------#
# (1) Tag ACS table
acs_tables = {
'S1903': 'income',
'B19001': 'incomerange',
'S0801': 'commute',
'S0802': 'vehicles',
'B25008': 'tenure',
'B02001': 'race',
'B01001': 'raceethnicity',
}
def tag_acs_table(df):
pattern = re.compile('([A-Za-z0-9]+)_')
df['table'] = df.progress_apply(
lambda row: acs_tables.get(pattern.match(row.variable).group(1)),
axis = 1
)
# Find the other B19001A, B19001B, etc tables and tag them
df['table'] = df.progress_apply(
lambda row: 'incomerange' if 'B19001' in row.variable else row.table,
axis = 1
)
df['table'] = df.progress_apply(
lambda row: 'raceethnicity' if 'B01001' in row.variable else row.table,
axis = 1
)
return df
# (2) Tag main variable
def income_vars(row):
if '_C01' in row.variable:
return 'hh'
elif '_C02' in row.variable:
return 'medincome'
elif '_C03' in row.variable:
return 'medincome'
def incomerange_vars(row):
if 'B19001_' in row.variable:
return 'total'
elif 'B19001A' in row.variable:
return 'white'
elif 'B19001B' in row.variable:
return 'black'
elif 'B19001C' in row.variable:
return 'amerind'
elif 'B19001D' in row.variable:
return 'asian'
elif 'B19001E' in row.variable:
return 'pacis'
elif 'B19001F' in row.variable:
return 'other'
elif 'B19001G' in row.variable:
return 'race2'
elif 'B19001H' in row.variable:
return 'nonhisp'
elif 'B19001I' in row.variable:
return 'hisp'
def vehicle_vars(row):
if 'C01' in row.variable:
return 'workers'
def commute_vars(row):
if 'C01' in row.variable:
return 'workers'
elif 'C02' in row.variable:
return 'male'
elif 'C03' in row.variable:
return 'female'
def tenure_vars(row):
if 'B25008' in row.variable:
return 'pop'
def race_vars(row):
if 'B02001' in row.variable:
return 'pop'
def race_eth_vars(row):
if 'B01001_' in row.variable:
return 'total'
elif 'B01001A' in row.variable:
return 'white'
elif 'B01001B' in row.variable:
return 'black'
elif 'B01001C' in row.variable:
return 'amerind'
elif 'B01001D' in row.variable:
return 'asian'
elif 'B01001E' in row.variable:
return 'pacis'
elif 'B01001F' in row.variable:
return 'other'
elif 'B01001G' in row.variable:
return 'race2'
elif 'B01001H' in row.variable:
return 'whitenonhisp'
elif 'B01001I' in row.variable:
return 'hisp'
main_vars_dict = {
'income': income_vars,
'incomerange': incomerange_vars,
'vehicles': vehicle_vars,
'commute': commute_vars,
'tenure': tenure_vars,
'race': race_vars,
'raceethnicity': race_eth_vars,
}
# (3) Tag secondary variable
# Secondary variable - use last 2 characters
income = {'01': 'total', '02': 'white', '03': 'black', '04': 'amerind', '05': 'asian',
'06': 'pacis', '07': 'other', '08': 'race2', '09': 'hisp', '10': 'nonhisp'}
incomerange = {'01': 'total', '02': 'lt10', '03': 'r10to14', '04': 'r15to19', '05': 'r20to24',
'06': 'r25to29', '07': 'r30to34', '08': 'r35to39', '09': 'r40to44', '10': 'r45to49',
'11': 'r50to59', '12': 'r60to74', '13': 'r75to99', '14': 'r100to124', '15': 'r125to149',
'16': 'r150to199', '17': 'gt200'}
vehicles = {'01': 'total', '94': 'veh0', '95': 'veh1', '96': 'veh2', '97': 'veh3'}
commute = {'01': 'total', '03': 'car1', '05': 'car2', '06': 'car3', '07': 'car4',
'09': 'transit', '10': 'walk', '11': 'bike', '12': 'other', '13': 'telecommute'}
tenure = {'01': 'total', '02': 'owner', '03': 'renter'}
race = {'01': 'total', '02': 'white', '03': 'black', '04': 'amerind', '05': 'asian',
'06': 'pacis', '07': 'other', '08': 'race2'}
raceethnicity = {'01':'total'}
def tag_secondary_variable(df):
df['last2'] = df['variable'].str[-2:]
def pick_secondary_var(row):
if row.table=='income':
return income[row.last2]
elif row.table=='incomerange':
return incomerange[row.last2]
elif row.table=="vehicles":
return vehicles[row.last2]
elif row.table=="commute":
return commute[row.last2]
elif row.table=="tenure":
return tenure[row.last2]
elif row.table=="race":
return race[row.last2]
elif row.table=="raceethnicity":
return raceethnicity[row.last2]
df['second_var'] = df.progress_apply(pick_secondary_var, axis = 1)
return df
#--------------------------------------------------------------------#
# Apply functions
#--------------------------------------------------------------------#
time0 = datetime.now()
print(f'Start time: {time0}')
df = pd.read_parquet(f's3://{bucket_name}/data/raw/raw_census.parquet')
time1 = datetime.now()
print(f'Read in parquet: {time1}')
# (1) Tag ACS table
df = tag_acs_table(df)
time2 = datetime.now()
print(f'Tag ACS table: {time2 - time1}')
# (2) Tag main variable
df['main_var'] = df.progress_apply(lambda row: main_vars_dict[row['table']](row), axis = 1)
time3 = datetime.now()
print(f'Tag main var: {time3 - time2}')
# (3) Tag secondary variable
df = tag_secondary_variable(df)
time4 = datetime.now()
print(f'Tag secondary var: {time4 - time3}')
# Create new_var column
df['new_var'] = df.main_var + "_" + df.second_var
# Export
df.to_parquet(f's3://{bucket_name}/data/intermediate/census_tagged.parquet')
time5 = datetime.now()
print(f'Total execution time: {time5 - time0}')
| 30.021834
| 100
| 0.554909
|
4a13106cf6b02b3e038ed72eaa86b4936ae46eae
| 389
|
py
|
Python
|
safe_explorer/utils/path.py
|
FelippeRoza/safe-explorer
|
de0e0d2107578fac4d9fdc774f6d8094f9d15168
|
[
"Apache-2.0"
] | 33
|
2020-05-25T01:19:08.000Z
|
2022-03-29T02:38:51.000Z
|
safe_explorer/utils/path.py
|
FelippeRoza/safe-explorer
|
de0e0d2107578fac4d9fdc774f6d8094f9d15168
|
[
"Apache-2.0"
] | 2
|
2020-12-22T09:01:34.000Z
|
2021-04-14T08:02:23.000Z
|
safe_explorer/utils/path.py
|
FelippeRoza/safe-explorer
|
de0e0d2107578fac4d9fdc774f6d8094f9d15168
|
[
"Apache-2.0"
] | 13
|
2019-10-19T07:59:40.000Z
|
2022-03-17T03:07:52.000Z
|
import inspect
import os
def get_project_root_dir():
return f"{get_current_file_path()}/../../"
def get_current_file_path():
caller_file_path = os.path.abspath(inspect.getfile(inspect.currentframe().f_back))
return os.path.dirname(caller_file_path)
def get_files_in_path(path):
return [f for f in os.listdir(path) \
if os.path.isfile(os.path.join(path, f))]
| 27.785714
| 86
| 0.714653
|
4a13108decb5ade903a3ddd94e76c6478ebe7dc9
| 1,829
|
py
|
Python
|
botctl/botctl.py
|
wizeline/botctl
|
85f69f7fa463246661823c9686e6550d4b4ca03e
|
[
"MIT"
] | null | null | null |
botctl/botctl.py
|
wizeline/botctl
|
85f69f7fa463246661823c9686e6550d4b4ca03e
|
[
"MIT"
] | null | null | null |
botctl/botctl.py
|
wizeline/botctl
|
85f69f7fa463246661823c9686e6550d4b4ca03e
|
[
"MIT"
] | 1
|
2020-10-13T16:30:05.000Z
|
2020-10-13T16:30:05.000Z
|
from botctl.common import command_callback, execute_subcommand, parse_variable
from botctl.types import PlatformEnvironment, BotControlCommand
class DelCommand(BotControlCommand):
"""Usage:
$ botctl del <variable>
"""
__commandname__ = 'botctl'
@command_callback
def __call__(self, variable_name):
environment, variable = parse_variable(self.config, variable_name)
self.config.del_value(environment, variable)
self.config.commit()
return 0
class GetCommand(BotControlCommand):
"""Usage:
$ botctl get <variable>
"""
__commandname__ = 'botctl'
@command_callback
def __call__(self, variable_name):
environment, variable = parse_variable(self.config, variable_name)
print(self.config.get_value(environment, variable))
return 0
class SetCommand(BotControlCommand):
"""Usage
$ botctl set <variable> <value>
"""
__commandname__ = 'botctl'
@command_callback
def __call__(self, variable_name, variable_value):
environment, variable = parse_variable(self.config, variable_name)
self.config.put_value(environment, variable, variable_value)
self.config.commit()
return 0
class ChangeEnvironmentCommand(BotControlCommand):
"""Usage
$ botctl chenv {local | development | production}
"""
__commandname__ = 'botctl'
@command_callback
def __call__(self, environment_name):
environment = PlatformEnvironment(environment_name.upper())
self.config.set_environment(environment)
self.config.commit()
return 0
def main():
callbacks = {
'set': SetCommand,
'get': GetCommand,
'del': DelCommand,
'chenv': ChangeEnvironmentCommand
}
return execute_subcommand('botctl', **callbacks)
| 26.897059
| 78
| 0.681247
|
4a1310e9723e7509ebe088a0aafc893eeeefc3e8
| 2,519
|
py
|
Python
|
games/game.py
|
carllacan/qlearning
|
a4fe2296fb6733c060ae20cf1a6bc3123078ebd7
|
[
"MIT"
] | null | null | null |
games/game.py
|
carllacan/qlearning
|
a4fe2296fb6733c060ae20cf1a6bc3123078ebd7
|
[
"MIT"
] | null | null | null |
games/game.py
|
carllacan/qlearning
|
a4fe2296fb6733c060ae20cf1a6bc3123078ebd7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 10:18:38 2017
@author: carles
"""
import numpy as np
class Game:
def __init__(self, grid_height, grid_width, frames_used):
"""
To make printing easier the coordinates refer to
distance from the top border
distance from the left border
"""
self.frames_used = frames_used # do away with this
self.grid_height = grid_height
self.grid_width = grid_width
self.grid_shape = self.grid_height, self.grid_width
self.grid_size = grid_height*grid_width
self.grid = np.zeros(self.grid_shape)
self.gameover = False
self.last_frames = []
[self.remember_frame(self.grid) for i in range(frames_used)]
def remember_frame(self, state):
self.last_frames.append(state)
if len(self.last_frames) > self.frames_used:
self.last_frames.pop(0)
def transition(self, action):
"""
Inputs: action
Each game needs to have a function that computes its next state
given an action. It also returns the reward.
Outputs: reward
"""
return 0 # return reward
def get_state(self):
"""
Returns the grid.
"""
# return self.grid
return np.array(self.last_frames)
# easier to use built-in arrays and then convert to np.array
def get_actions(self):
"""
Get the possible actions for this game
"""
return []
def tile_symbols(self, tile):
"""
Prettifies the printed screen. Each game can assign a character to
each kind of tile. By default use simply the numbers in the grid.
"""
return tile
def set_tile(self, pos, v):
"""
Converts whatever pos is to a tuple and modifies the grid
"""
self.grid[tuple(pos)] = v
def draw_screen(self):
"""
Print what is in each cell of the grid.
"""
w = len(str(self.tile_symbols(0))) # width of the tile symbols
print("╔" + "═"*self.grid_width*w + "╗")
for row in self.grid:
print("║", end="")
for tile in row:
print(self.tile_symbols(int(tile)), end="")
print("║")
print("╚" + "═"*self.grid_width*w + "╝")
def reset(self):
self.__init__(self.frames_used)
| 28.625
| 74
| 0.55657
|
4a1311ecffab88b51d19b2cfe16c33b5102a1597
| 4,868
|
py
|
Python
|
pylib/cqlshlib/test/cassconnect.py
|
haaawk/scylla-tools-java
|
283ce3a58a2b04e60a84ce6744eee55ce09b3801
|
[
"Apache-2.0"
] | 7
|
2021-04-26T14:52:42.000Z
|
2021-12-03T22:53:17.000Z
|
pylib/cqlshlib/test/cassconnect.py
|
haaawk/scylla-tools-java
|
283ce3a58a2b04e60a84ce6744eee55ce09b3801
|
[
"Apache-2.0"
] | null | null | null |
pylib/cqlshlib/test/cassconnect.py
|
haaawk/scylla-tools-java
|
283ce3a58a2b04e60a84ce6744eee55ce09b3801
|
[
"Apache-2.0"
] | 1
|
2017-05-18T14:40:23.000Z
|
2017-05-18T14:40:23.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import contextlib
import tempfile
import os.path
from .basecase import cql, cqlsh, cqlshlog, TEST_HOST, TEST_PORT, rundir, policy, quote_name
from .run_cqlsh import run_cqlsh, call_cqlsh
test_keyspace_init = os.path.join(rundir, 'test_keyspace_init.cql')
def get_cassandra_connection(cql_version=cqlsh.DEFAULT_CQLVER):
if cql_version is None:
cql_version = cqlsh.DEFAULT_CQLVER
conn = cql((TEST_HOST,), TEST_PORT, cql_version=cql_version, load_balancing_policy=policy)
# until the cql lib does this for us
conn.cql_version = cql_version
return conn
def get_cassandra_cursor(cql_version=cqlsh.DEFAULT_CQLVER):
return get_cassandra_connection(cql_version=cql_version).cursor()
TEST_KEYSPACES_CREATED = []
def get_keyspace():
return None if len(TEST_KEYSPACES_CREATED) == 0 else TEST_KEYSPACES_CREATED[-1]
def make_ks_name():
# abuse mktemp to get a quick random-ish name
return os.path.basename(tempfile.mktemp(prefix='CqlshTests_'))
def create_keyspace(cursor):
ksname = make_ks_name()
qksname = quote_name(ksname)
cursor.execute('''
CREATE KEYSPACE %s WITH replication =
{'class': 'SimpleStrategy', 'replication_factor': 1};
''' % quote_name(ksname))
cursor.execute('USE %s;' % qksname)
TEST_KEYSPACES_CREATED.append(ksname)
return ksname
def split_cql_commands(source):
ruleset = cql_rule_set()
statements, endtoken_escaped = ruleset.cql_split_statements(source)
if endtoken_escaped:
raise ValueError("CQL source ends unexpectedly")
return [ruleset.cql_extract_orig(toks, source) for toks in statements if toks]
def execute_cql_commands(cursor, source, logprefix='INIT: '):
for cql in split_cql_commands(source):
cqlshlog.debug(logprefix + cql)
cursor.execute(cql)
def execute_cql_file(cursor, fname):
with open(fname) as f:
return execute_cql_commands(cursor, f.read())
def create_db():
with cassandra_cursor(ks=None) as c:
k = create_keyspace(c)
execute_cql_file(c, test_keyspace_init)
return k
def remove_db():
with cassandra_cursor(ks=None) as c:
c.execute('DROP KEYSPACE %s' % quote_name(TEST_KEYSPACES_CREATED.pop(-1)))
@contextlib.contextmanager
def cassandra_connection(cql_version=cqlsh.DEFAULT_CQLVER):
"""
Make a Cassandra CQL connection with the given CQL version and get a cursor
for it, and optionally connect to a given keyspace.
The connection is returned as the context manager's value, and it will be
closed when the context exits.
"""
conn = get_cassandra_connection(cql_version=cql_version)
try:
yield conn
finally:
conn.close()
@contextlib.contextmanager
def cassandra_cursor(cql_version=None, ks=''):
"""
Make a Cassandra CQL connection with the given CQL version and get a cursor
for it, and optionally connect to a given keyspace. If ks is the empty
string (default), connect to the last test keyspace created. If ks is None,
do not connect to any keyspace. Otherwise, attempt to connect to the
keyspace named.
The cursor is returned as the context manager's value, and the connection
will be closed when the context exits.
"""
if ks == '':
ks = get_keyspace()
conn = get_cassandra_connection(cql_version=cql_version)
try:
c = conn.connect(ks)
# if ks is not None:
# c.execute('USE %s;' % quote_name(c, ks))
yield c
finally:
conn.shutdown()
def cql_rule_set():
return cqlsh.cql3handling.CqlRuleSet
class DEFAULTVAL: pass
def testrun_cqlsh(keyspace=DEFAULTVAL, **kwargs):
# use a positive default sentinel so that keyspace=None can be used
# to override the default behavior
if keyspace is DEFAULTVAL:
keyspace = get_keyspace()
return run_cqlsh(keyspace=keyspace, **kwargs)
def testcall_cqlsh(keyspace=None, **kwargs):
if keyspace is None:
keyspace = get_keyspace()
return call_cqlsh(keyspace=keyspace, **kwargs)
| 34.524823
| 94
| 0.724938
|
4a131246b9bc949fd302424fb09266faf9b1f980
| 28,787
|
py
|
Python
|
python/ccxt/bleutrade.py
|
florije4ex/ccxt
|
1dba6c5e45c5e93292f1951e0a2411647a82624a
|
[
"MIT"
] | null | null | null |
python/ccxt/bleutrade.py
|
florije4ex/ccxt
|
1dba6c5e45c5e93292f1951e0a2411647a82624a
|
[
"MIT"
] | null | null | null |
python/ccxt/bleutrade.py
|
florije4ex/ccxt
|
1dba6c5e45c5e93292f1951e0a2411647a82624a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
class bleutrade(Exchange):
def describe(self):
return self.deep_extend(super(bleutrade, self).describe(), {
'id': 'bleutrade',
'name': 'Bleutrade',
'countries': ['BR'], # Brazil
'rateLimit': 1000,
'certified': False,
'has': {
'cancelOrder': True,
'CORS': True,
'createLimitOrder': False,
'createMarketOrder': False,
'createOrder': True,
'editOrder': False,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': False,
'fetchWithdrawals': True,
'withdraw': False,
},
'timeframes': {
'1h': '1h',
'4h': '4h',
'8h': '8h',
'1d': '1d',
'1w': '1w',
},
'hostname': 'bleutrade.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/30303000-b602dbe6-976d-11e7-956d-36c5049c01e7.jpg',
'api': {
'v3Private': 'https://{hostname}/api/v3/private',
'v3Public': 'https://{hostname}/api/v3/public',
},
'www': 'https://bleutrade.com',
'doc': [
'https://app.swaggerhub.com/apis-docs/bleu/white-label/3.0.0',
],
'fees': 'https://bleutrade.com/fees/',
},
'api': {
'v3Public': {
'get': [
'getassets',
'getmarkets',
'getticker',
'getmarketsummary',
'getmarketsummaries',
'getorderbook',
'getmarkethistory',
'getcandles',
],
},
'v3Private': {
'get': [
'statement',
],
'post': [
'getbalance',
'getbalances',
'buylimit',
'selllimit',
'buylimitami',
'selllimitami',
'buystoplimit',
'sellstoplimit',
'ordercancel',
'getopenorders',
'getcloseorders',
'getdeposithistory',
'getdepositaddress',
'getmytransactions',
'withdraw',
'directtransfer',
'getwithdrawhistory',
'getlimits',
],
},
},
'commonCurrencies': {
'EPC': 'Epacoin',
},
'exceptions': {
'exact': {
'ERR_INSUFICIENT_BALANCE': InsufficientFunds,
'ERR_LOW_VOLUME': BadRequest,
'Invalid form': BadRequest,
},
'broad': {
'Order is not open': InvalidOrder,
'Invalid Account / Api KEY / Api Secret': AuthenticationError, # also happens when an invalid nonce is used
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.25 / 100,
'maker': 0.25 / 100,
},
},
'options': {
'parseOrderStatus': True,
},
})
# undocumented api calls
# https://bleutrade.com/api/v3/public/tradingview/symbols?symbol=ETH_BTC
# https://bleutrade.com/api/v3/public/tradingview/config
# https://bleutrade.com/api/v3/public/tradingview/time
# https://bleutrade.com/api/v3/private/getcloseorders?market=ETH_BTC
# https://bleutrade.com/config contains the fees
def fetch_currencies(self, params={}):
response = self.v3PublicGetGetassets(params)
items = response['result']
result = {}
for i in range(0, len(items)):
# {Asset: 'USDT',
# AssetLong: 'Tether',
# MinConfirmation: 4,
# WithdrawTxFee: 1,
# WithdrawTxFeePercent: 0,
# SystemProtocol: 'ETHERC20',
# IsActive: True,
# InfoMessage: '',
# MaintenanceMode: False,
# MaintenanceMessage: '',
# FormatPrefix: '',
# FormatSufix: '',
# DecimalSeparator: '.',
# ThousandSeparator: ',',
# DecimalPlaces: 8,
# Currency: 'USDT',
# CurrencyLong: 'Tether',
# CoinType: 'ETHERC20'}
item = items[i]
id = self.safe_string(item, 'Asset')
code = self.safe_currency_code(id)
result[code] = {
'id': id,
'code': code,
'name': self.safe_string(item, 'AssetLong'),
'active': self.safe_value(item, 'IsActive') and not self.safe_value(item, 'MaintenanceMode'),
'fee': self.safe_float(item, 'WithdrawTxFee'),
'precision': self.safe_float(item, 'DecimalPlaces'),
'info': item,
'limits': self.limits,
}
return result
def fetch_markets(self, params={}):
# https://github.com/ccxt/ccxt/issues/5668
response = self.v3PublicGetGetmarkets(params)
result = []
markets = self.safe_value(response, 'result')
for i in range(0, len(markets)):
market = markets[i]
# {MarketName: 'LTC_USDT',
# MarketAsset: 'LTC',
# BaseAsset: 'USDT',
# MarketAssetLong: 'Litecoin',
# BaseAssetLong: 'Tether',
# IsActive: True,
# MinTradeSize: 0.0001,
# InfoMessage: '',
# MarketCurrency: 'LTC',
# BaseCurrency: 'USDT',
# MarketCurrencyLong: 'Litecoin',
# BaseCurrencyLong: 'Tether'}
id = self.safe_string(market, 'MarketName')
baseId = self.safe_string(market, 'MarketAsset')
quoteId = self.safe_string(market, 'BaseAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
active = self.safe_value(market, 'IsActive', False)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'info': market,
'precision': precision,
'maker': self.fees['trading']['maker'],
'taker': self.fees['trading']['taker'],
'limits': {
'amount': {
'min': self.safe_float(market, 'MinTradeSize'),
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
},
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'market': self.market_id(symbol),
'type': 'ALL',
}
if limit is not None:
request['depth'] = limit # 50
response = self.v3PublicGetGetorderbook(self.extend(request, params))
orderbook = self.safe_value(response, 'result')
if not orderbook:
raise ExchangeError(self.id + ' no orderbook data in ' + self.json(response))
return self.parse_order_book(orderbook, None, 'buy', 'sell', 'Rate', 'Quantity')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.v3PublicGetGetmarketsummary(self.extend(request, params))
ticker = self.safe_value(response, 'result', {})
return self.parse_ticker(ticker, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.v3PublicGetGetmarketsummaries(params)
result = self.safe_value(response, 'result')
tickers = []
for i in range(0, len(result)):
ticker = self.parse_ticker(result[i])
tickers.append(ticker)
return self.filter_by_array(tickers, 'symbol', symbols)
def parse_ticker(self, ticker, market=None):
# {TimeStamp: '2020-01-14 14:32:28',
# MarketName: 'LTC_USDT',
# MarketAsset: 'LTC',
# BaseAsset: 'USDT',
# MarketAssetName: 'Litecoin',
# BaseAssetName: 'Tether',
# PrevDay: 49.2867503,
# High: 56.78622664,
# Low: 49.27384025,
# Last: 53.94,
# Average: 51.37509368,
# Volume: 1.51282404,
# BaseVolume: 77.72147677,
# Bid: 53.62070218,
# Ask: 53.94,
# IsActive: 'true',
# InfoMessage: '',
# MarketCurrency: 'Litecoin',
# BaseCurrency: 'Tether'}
timestamp = self.parse8601(self.safe_string(ticker, 'TimeStamp'))
marketId = self.safe_string(ticker, 'MarketName')
symbol = self.safe_symbol(marketId, market, '_')
previous = self.safe_float(ticker, 'PrevDay')
last = self.safe_float(ticker, 'Last')
change = None
percentage = None
if last is not None:
if previous is not None:
change = last - previous
if previous > 0:
percentage = (change / previous) * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'High'),
'low': self.safe_float(ticker, 'Low'),
'bid': self.safe_float(ticker, 'Bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'Ask'),
'askVolume': None,
'vwap': None,
'open': previous,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float(ticker, 'Volume'),
'quoteVolume': self.safe_float(ticker, 'BaseVolume'),
'info': ticker,
}
def parse_ohlcv(self, ohlcv, market=None):
return [
self.parse8601(ohlcv['TimeStamp'] + '+00:00'),
self.safe_float(ohlcv, 'Open'),
self.safe_float(ohlcv, 'High'),
self.safe_float(ohlcv, 'Low'),
self.safe_float(ohlcv, 'Close'),
self.safe_float(ohlcv, 'Volume'),
]
def fetch_ohlcv(self, symbol, timeframe='15m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'period': self.timeframes[timeframe],
'market': market['id'],
'count': limit,
}
response = self.v3PublicGetGetcandles(self.extend(request, params))
result = self.safe_value(response, 'result', [])
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
# todo: STOP-LIMIT and AMI order types are supported
raise InvalidOrder(self.id + ' allows limit orders only')
self.load_markets()
request = {
'rate': self.price_to_precision(symbol, price),
'quantity': self.amount_to_precision(symbol, amount),
'tradeType': '1' if (side == 'buy') else '0',
'market': self.market_id(symbol),
}
response = None
if side == 'buy':
response = self.v3PrivatePostBuylimit(self.extend(request, params))
else:
response = self.v3PrivatePostSelllimit(self.extend(request, params))
# {success: True,
# message: "",
# result: "161105236"},
return {
'info': response,
'id': self.safe_string(response, 'result'),
}
def cancel_order(self, id, symbol=None, params={}):
request = {
'orderid': id,
}
response = self.v3PrivatePostOrdercancel(self.extend(request, params))
# {success: True, message: '', result: ''}
return response
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = self.v3PrivatePostGetopenorders(self.extend(request, params))
items = self.safe_value(response, 'result', [])
return self.parse_orders(items, market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.v3PrivatePostGetbalances(params)
result = {'info': response}
items = response['result']
for i in range(0, len(items)):
item = items[i]
currencyId = self.safe_string(item, 'Asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(item, 'Available')
account['total'] = self.safe_float(item, 'Balance')
result[code] = account
return self.parse_balance(result)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = self.v3PrivatePostGetcloseorders(self.extend(request, params))
orders = self.safe_value(response, 'result', [])
return self.parse_orders(orders, market, since, limit)
def fetch_transactions_with_method(self, method, code=None, since=None, limit=None, params={}):
self.load_markets()
response = getattr(self, method)(params)
transactions = self.safe_value(response, 'result', [])
return self.parse_transactions(transactions, code, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_with_method('v3PrivatePostGetdeposithistory', code, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_with_method('v3PrivatePostGetwithdrawhistory', code, since, limit, params)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = self.v3PrivatePostGetdepositaddress(self.extend(request, params))
# {success: True,
# message: '',
# result:
# {Asset: 'ETH',
# AssetName: 'Ethereum',
# DepositAddress: '0x748c5c8jhksjdfhd507d3aa9',
# Currency: 'ETH',
# CurrencyName: 'Ethereum'}}
item = response['result']
address = self.safe_string(item, 'DepositAddress')
return {
'currency': code,
'address': self.check_address(address),
# 'tag': tag,
'info': item,
}
def parse_ledger_entry_type(self, type):
# deposits don't seem to appear in here
types = {
'TRADE': 'trade',
'WITHDRAW': 'transaction',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# trade(both sides)
#
# {
# ID: 109660527,
# TimeStamp: '2018-11-14 15:12:57.140776',
# Asset: 'ETH',
# AssetName: 'Ethereum',
# Amount: 0.01,
# Type: 'TRADE',
# Description: 'Trade +, order id 133111123',
# Comments: '',
# CoinSymbol: 'ETH',
# CoinName: 'Ethereum'
# }
#
# {
# ID: 109660526,
# TimeStamp: '2018-11-14 15:12:57.140776',
# Asset: 'BTC',
# AssetName: 'Bitcoin',
# Amount: -0.00031776,
# Type: 'TRADE',
# Description: 'Trade -, order id 133111123, fee -0.00000079',
# Comments: '',
# CoinSymbol: 'BTC',
# CoinName: 'Bitcoin'
# }
#
# withdrawal
#
# {
# ID: 104672316,
# TimeStamp: '2018-05-03 08:18:19.031831',
# Asset: 'DOGE',
# AssetName: 'Dogecoin',
# Amount: -61893.87864686,
# Type: 'WITHDRAW',
# Description: 'Withdraw: 61883.87864686 to address DD8tgehNNyYB2iqVazi2W1paaztgcWXtF6; fee 10.00000000',
# Comments: '',
# CoinSymbol: 'DOGE',
# CoinName: 'Dogecoin'
# }
#
code = self.safe_currency_code(self.safe_string(item, 'CoinSymbol'), currency)
description = self.safe_string(item, 'Description')
type = self.parse_ledger_entry_type(self.safe_string(item, 'Type'))
referenceId = None
fee = None
delimiter = ', ' if (type == 'trade') else '; '
parts = description.split(delimiter)
for i in range(0, len(parts)):
part = parts[i]
if part.find('fee') == 0:
part = part.replace('fee ', '')
feeCost = float(part)
if feeCost < 0:
feeCost = -feeCost
fee = {
'cost': feeCost,
'currency': code,
}
elif part.find('order id') == 0:
referenceId = part.replace('order id ', '')
#
# does not belong to Ledger, related to parseTransaction
#
# if part.find('Withdraw') == 0:
# details = part.split(' to address ')
# if len(details) > 1:
# address = details[1]
# }
#
timestamp = self.parse8601(self.safe_string(item, 'TimeStamp'))
amount = self.safe_float(item, 'Amount')
direction = None
if amount is not None:
direction = 'in'
if amount < 0:
direction = 'out'
amount = -amount
id = self.safe_string(item, 'ID')
return {
'id': id,
'info': item,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': None,
'referenceId': referenceId,
'referenceAccount': None,
'type': type,
'currency': code,
'amount': amount,
'before': None,
'after': None,
'status': 'ok',
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
# only seems to return 100 items and there is no documented way to change page size or offset
request = {
}
response = self.v3PrivatePostGetmytransactions(self.extend(request, params))
items = response['result']
return self.parse_ledger(items, code, since, limit)
def parse_order(self, order, market=None):
#
# fetchClosedOrders
#
# {OrderID: 89742658,
# Exchange: 'DOGE_BTC',
# Type: 'BUY',
# Quantity: 10000,
# QuantityRemaining: 0,
# QuantityBaseTraded: 0,
# Price: 6.6e-7,
# Status: 'OK',
# Created: '2018-02-16 08:55:36',
# Comments: ''}
#
# fetchOpenOrders
#
# {OrderID: 161105302,
# Exchange: 'ETH_BTC',
# Type: 'SELL',
# Quantity: 0.4,
# QuantityRemaining: 0.4,
# QuantityBaseTraded: 0,
# Price: 0.04,
# Status: 'OPEN',
# Created: '2020-01-22 09:21:27',
# Comments: {String: '', Valid: True}
side = self.safe_string(order, 'Type').lower()
status = self.parse_order_status(self.safe_string(order, 'Status'))
marketId = self.safe_string(order, 'Exchange')
symbol = self.safe_symbol(marketId, market, '_')
timestamp = None
if 'Created' in order:
timestamp = self.parse8601(order['Created'] + '+00:00')
price = self.safe_float(order, 'Price')
cost = None
amount = self.safe_float(order, 'Quantity')
remaining = self.safe_float(order, 'QuantityRemaining')
filled = None
if amount is not None and remaining is not None:
filled = amount - remaining
if not cost:
if price and filled:
cost = price * filled
if not price:
if cost and filled:
price = cost / filled
average = self.safe_float(order, 'PricePerUnit')
id = self.safe_string(order, 'OrderID')
return {
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': 'limit',
'timeInForce': None,
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def parse_order_status(self, status):
statuses = {
'OK': 'closed',
'OPEN': 'open',
'CANCELED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# deposit:
#
# {ID: 118698752,
# Timestamp: '2020-01-21 11:16:09',
# Asset: 'ETH',
# Amount: 1,
# TransactionID: '',
# Status: 'CONFIRMED',
# Label: '0x748c5c8228d0c596f4d07f338blah',
# Symbol: 'ETH'}
#
# withdrawal:
#
# {ID: 689281,
# Timestamp: '2019-07-05 13:14:43',
# Asset: 'BTC',
# Amount: -0.108959,
# TransactionID: 'da48d6901fslfjsdjflsdjfls852b87e362cad1',
# Status: 'CONFIRMED',
# Label: '0.1089590;35wztHPMgrebFvvblah;0.00100000',
# Symbol: 'BTC'}
#
id = self.safe_string(transaction, 'ID')
amount = self.safe_float(transaction, 'Amount')
type = 'deposit'
if amount < 0:
amount = abs(amount)
type = 'withdrawal'
currencyId = self.safe_string(transaction, 'Asset')
code = self.safe_currency_code(currencyId, currency)
label = self.safe_string(transaction, 'Label')
timestamp = self.parse8601(self.safe_string(transaction, 'Timestamp'))
txid = self.safe_string(transaction, 'TransactionID')
address = None
feeCost = None
labelParts = label.split(';')
if len(labelParts) == 3:
amount = float(labelParts[0])
address = labelParts[1]
feeCost = float(labelParts[2])
else:
address = label
fee = None
if feeCost is not None:
fee = {
'currency': code,
'cost': feeCost,
}
status = 'ok'
if txid == 'CANCELED':
txid = None
status = 'canceled'
return {
'info': transaction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'fee': fee,
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.implode_params(self.urls['api'][api], {
'hostname': self.hostname,
}) + '/'
if api == 'v3Private':
self.check_required_credentials()
request = {
'apikey': self.apiKey,
'nonce': self.nonce(),
}
url += path + '?' + self.urlencode(self.extend(request, params))
signature = self.hmac(self.encode(url), self.encode(self.secret), hashlib.sha512)
headers = {'apisign': signature}
else:
url += path + '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
# examples...
# {"success":false,"message":"Erro: Order is not open.","result":""} <-- 'error' is spelt wrong
# {"success":false,"message":"Error: Very low volume.","result":"ERR_LOW_VOLUME"}
# {"success":false,"message":"Error: Insuficient Balance","result":"ERR_INSUFICIENT_BALANCE"}
# {"success":false,"message":"Invalid form","result":null}
#
success = self.safe_value(response, 'success')
if success is None:
raise ExchangeError(self.id + ': malformed response: ' + self.json(response))
if not success:
feedback = self.id + ' ' + body
errorCode = self.safe_string(response, 'result')
if errorCode is not None:
self.throw_broadly_matched_exception(self.exceptions['broad'], errorCode, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
errorMessage = self.safe_string(response, 'message')
self.throw_broadly_matched_exception(self.exceptions['broad'], errorMessage, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorMessage, feedback)
raise ExchangeError(feedback)
| 37.877632
| 128
| 0.495189
|
4a1312787db7c6e7866aa9a21a749ad08603bec3
| 2,314
|
py
|
Python
|
eventsourcing/application/snapshotting.py
|
scbabacus/eventsourcing
|
8404c5b26719ed9d9d1d257ebba774879c7243c4
|
[
"BSD-3-Clause"
] | null | null | null |
eventsourcing/application/snapshotting.py
|
scbabacus/eventsourcing
|
8404c5b26719ed9d9d1d257ebba774879c7243c4
|
[
"BSD-3-Clause"
] | null | null | null |
eventsourcing/application/snapshotting.py
|
scbabacus/eventsourcing
|
8404c5b26719ed9d9d1d257ebba774879c7243c4
|
[
"BSD-3-Clause"
] | null | null | null |
from eventsourcing.application.policies import SnapshottingPolicy
from eventsourcing.application.simple import SimpleApplication
from eventsourcing.infrastructure.eventstore import EventStore
from eventsourcing.infrastructure.snapshotting import EventSourcedSnapshotStrategy
class ApplicationWithSnapshotting(SimpleApplication):
# Todo: Change this to default to None?
snapshot_period = 2
def __init__(self, snapshot_period=None, snapshot_record_class=None, **kwargs):
self.snapshot_period = snapshot_period or self.snapshot_period
self.snapshot_record_class = snapshot_record_class
self.snapshotting_policy = None
super(ApplicationWithSnapshotting, self).__init__(**kwargs)
def setup_event_store(self):
super(ApplicationWithSnapshotting, self).setup_event_store()
# Setup event store for snapshots.
self.snapshot_store = EventStore(
record_manager=self.infrastructure_factory.construct_snapshot_record_manager(),
sequenced_item_mapper=self.sequenced_item_mapper_class(
sequenced_item_class=self.sequenced_item_class
)
)
def setup_repository(self, **kwargs):
# Setup repository with a snapshot strategy.
self.snapshot_strategy = EventSourcedSnapshotStrategy(
snapshot_store=self.snapshot_store
)
super(ApplicationWithSnapshotting, self).setup_repository(
snapshot_strategy=self.snapshot_strategy, **kwargs
)
def setup_persistence_policy(self):
super(ApplicationWithSnapshotting, self).setup_persistence_policy()
self.snapshotting_policy = SnapshottingPolicy(
repository=self.repository,
snapshot_store=self.snapshot_store,
persist_event_type=self.persist_event_type,
period=self.snapshot_period
)
def setup_table(self):
super(ApplicationWithSnapshotting, self).setup_table()
if self.datastore is not None:
self.datastore.setup_table(self.snapshot_store.record_manager.record_class)
def close(self):
super(ApplicationWithSnapshotting, self).close()
if self.snapshotting_policy is not None:
self.snapshotting_policy.close()
self.snapshotting_policy = None
| 42.072727
| 91
| 0.727312
|
4a1313f5208ed23bdd7cffd7d3d8741610efe017
| 311
|
py
|
Python
|
Codewars/7kyu/string-scramble/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/7kyu/string-scramble/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/7kyu/string-scramble/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 3.6.0
test.describe('Example Tests')
test.assert_equals(scramble('abcd', [0, 3, 1, 2]), 'acdb', 'Should return acdb')
test.assert_equals(scramble('sc301s', [4, 0, 3, 1, 5, 2]), 'c0s3s1', 'Should return c0s3s1')
test.assert_equals(scramble('bskl5', [2, 1, 4, 3, 0]), '5sblk', 'Should return 5sblk')
| 38.875
| 92
| 0.655949
|
4a13158061ce18208de3cd2ed14611ddee478fe0
| 2,348
|
py
|
Python
|
pipeline/src/tables/flags.py
|
sawyerwatts/StopSpotDataPipeline
|
6537d0d1779d9ffa6a3096c02f4081d659c12a0e
|
[
"MIT"
] | 3
|
2020-02-19T05:25:56.000Z
|
2020-02-22T21:31:34.000Z
|
pipeline/src/tables/flags.py
|
sawyerwatts/StopSpotDataPipeline
|
6537d0d1779d9ffa6a3096c02f4081d659c12a0e
|
[
"MIT"
] | 69
|
2020-02-20T20:30:03.000Z
|
2020-05-29T01:20:05.000Z
|
pipeline/src/tables/flags.py
|
wolakdav/TeamBeeCapstoneProject
|
6957416273fda85a12e86408ae635d7491fb1035
|
[
"MIT"
] | 4
|
2020-06-05T03:47:49.000Z
|
2020-12-21T01:17:02.000Z
|
import pandas
from .table import Table
import flaggers.flagger as flagger
class Flags(Table):
def __init__(self, user=None, passwd=None, hostname=None, db_name=None, schema="hive", engine=None):
super().__init__(user, passwd, hostname, db_name, schema, engine)
self._table_name = "flags"
self._index_col = None
# flag_id will be explicitly set by flag's enum values rather than
# auto increment. This prevents strange duplicate flags with different
# id when changing the flag enums.
self._expected_cols = [
"flag_id",
"description",
"name"
]
self._creation_sql = "".join(["""
CREATE TABLE IF NOT EXISTS """, self._schema, ".", self._table_name, """
(
flag_id INTEGER PRIMARY KEY,
description VARCHAR(200),
name VARCHAR(30)
);"""])
def write_table(self, flags):
# flags is a list of [flag_id, flag_name]
df = pandas.DataFrame(flags, columns=self._expected_cols)
return self._write_table(df, conflict_columns=["flag_id"])
def create_table(self):
# Flags are written into the database on creation.
if not super().create_table():
return False
flags = []
for flag in flagger.Flags:
fd = flagger.flag_descriptions[flag]
flags.append([flag.value, fd.desc, fd.name])
self.write_table(flags)
return
def write_csv(self, path):
"""
Function is meant to be called by a subclass: saves passed in data to a csv file.
Args:
path (String): relative path to where csv will be saved.
Returns:
Boolean representing state of the operation (successfull write: True, error during process: False)
"""
flags = []
#Append expected cols first to create header row in the csv file
flags.append(self._expected_cols)
#Create list with all flag data
for flag in flagger.Flags:
flags.append([flag.value, flagger.flag_descriptions[flag]])
#Create pandas DataFrame from the list
df = pandas.DataFrame(flags)
#Call parent function that does actual saving
return super().write_csv(df, path)
| 30.894737
| 110
| 0.602641
|
4a1315b617ffe77878f223d4d658be4109a204e0
| 1,573
|
py
|
Python
|
internal/notes/builtin-SAVE/packages/xf86bigfontproto/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 1
|
2019-01-17T20:07:19.000Z
|
2019-01-17T20:07:19.000Z
|
internal/notes/builtin-SAVE/packages/xf86bigfontproto/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | null | null | null |
internal/notes/builtin-SAVE/packages/xf86bigfontproto/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T18:13:57.000Z
|
2021-11-05T18:19:49.000Z
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xf86bigfontproto(AutotoolsPackage):
"""X.org XF86BigFontProto protocol headers."""
homepage = "https://cgit.freedesktop.org/xorg/proto/xf86bigfontproto"
url = "https://www.x.org/archive/individual/proto/xf86bigfontproto-1.2.0.tar.gz"
version('1.2.0', '91b0733ff4cbe55808d96073258aa3d1')
| 44.942857
| 89
| 0.684043
|
4a1316b50c1e2f1f0a440e202b036949f0757a37
| 1,818
|
py
|
Python
|
yt_dlp/extractor/footyroom.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 11
|
2022-01-06T22:09:50.000Z
|
2022-03-12T22:26:22.000Z
|
yt_dlp/extractor/footyroom.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 4
|
2022-02-25T08:20:18.000Z
|
2022-03-17T16:16:20.000Z
|
yt_dlp/extractor/footyroom.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 3
|
2022-02-19T08:59:13.000Z
|
2022-03-06T16:11:21.000Z
|
from .common import InfoExtractor
from .streamable import StreamableIE
class FootyRoomIE(InfoExtractor):
_VALID_URL = r'https?://footyroom\.com/matches/(?P<id>\d+)'
_TESTS = [{
'url': 'http://footyroom.com/matches/79922154/hull-city-vs-chelsea/review',
'info_dict': {
'id': '79922154',
'title': 'VIDEO Hull City 0 - 2 Chelsea',
},
'playlist_count': 2,
'add_ie': [StreamableIE.ie_key()],
}, {
'url': 'http://footyroom.com/matches/75817984/georgia-vs-germany/review',
'info_dict': {
'id': '75817984',
'title': 'VIDEO Georgia 0 - 2 Germany',
},
'playlist_count': 1,
'add_ie': ['Playwire']
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._parse_json(self._search_regex(
r'DataStore\.media\s*=\s*([^;]+)', webpage, 'media data'),
playlist_id)
playlist_title = self._og_search_title(webpage)
entries = []
for video in playlist:
payload = video.get('payload')
if not payload:
continue
playwire_url = self._html_search_regex(
r'data-config="([^"]+)"', payload,
'playwire url', default=None)
if playwire_url:
entries.append(self.url_result(self._proto_relative_url(
playwire_url, 'http:'), 'Playwire'))
streamable_url = StreamableIE._extract_url(payload)
if streamable_url:
entries.append(self.url_result(
streamable_url, StreamableIE.ie_key()))
return self.playlist_result(entries, playlist_id, playlist_title)
| 33.666667
| 83
| 0.566557
|
4a1316edcff66608e648dcd888830c6a4351db34
| 4,015
|
py
|
Python
|
tests/test_api/test_inference_tracking.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
tests/test_api/test_inference_tracking.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
tests/test_api/test_inference_tracking.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from mmpose.apis import (get_track_id, inference_top_down_pose_model,
init_pose_model, vis_pose_tracking_result)
def test_pose_tracking_demo():
# COCO demo
# build the pose model from a config file and a checkpoint file
pose_model = init_pose_model(
'configs/top_down/resnet/coco/res50_coco_256x192.py',
None,
device='cpu')
image_name = 'tests/data/coco/000000000785.jpg'
person_result = [{'bbox': [50, 50, 50, 100]}]
# test a single image, with a list of bboxes.
pose_results, _ = inference_top_down_pose_model(
pose_model, image_name, person_result, format='xywh')
pose_results, next_id = get_track_id(pose_results, [], next_id=0)
# show the results
vis_pose_tracking_result(pose_model, image_name, pose_results)
pose_results_last = pose_results
# AIC demo
pose_model = init_pose_model(
'configs/top_down/resnet/aic/res50_aic_256x192.py', None, device='cpu')
image_name = 'tests/data/aic/054d9ce9201beffc76e5ff2169d2af2f027002ca.jpg'
# test a single image, with a list of bboxes.
pose_results, _ = inference_top_down_pose_model(
pose_model,
image_name,
person_result,
format='xywh',
dataset='TopDownAicDataset')
pose_results, next_id = get_track_id(pose_results, pose_results_last,
next_id)
# show the results
vis_pose_tracking_result(
pose_model, image_name, pose_results, dataset='TopDownAicDataset')
# OneHand10K demo
# build the pose model from a config file and a checkpoint file
pose_model = init_pose_model(
'configs/hand/resnet/onehand10k/res50_onehand10k_256x256.py',
None,
device='cpu')
image_name = 'tests/data/onehand10k/9.jpg'
# test a single image, with a list of bboxes.
pose_results, _ = inference_top_down_pose_model(
pose_model,
image_name, [{
'bbox': [10, 10, 30, 30]
}],
format='xywh',
dataset='OneHand10KDataset')
pose_results, next_id = get_track_id(pose_results, pose_results_last,
next_id)
# show the results
vis_pose_tracking_result(
pose_model, image_name, pose_results, dataset='OneHand10KDataset')
# InterHand2D demo
pose_model = init_pose_model(
'configs/hand/resnet/interhand2d/res50_interhand2d_all_256x256.py',
None,
device='cpu')
image_name = 'tests/data/interhand2.6m/image2017.jpg'
# test a single image, with a list of bboxes.
pose_results, _ = inference_top_down_pose_model(
pose_model,
image_name, [{
'bbox': [50, 50, 0, 0]
}],
format='xywh',
dataset='InterHand2DDataset')
pose_results, next_id = get_track_id(pose_results, [], next_id=0)
# show the results
vis_pose_tracking_result(
pose_model, image_name, pose_results, dataset='InterHand2DDataset')
pose_results_last = pose_results
# MPII demo
pose_model = init_pose_model(
'configs/top_down/resnet/mpii/res50_mpii_256x256.py',
None,
device='cpu')
image_name = 'tests/data/mpii/004645041.jpg'
# test a single image, with a list of bboxes.
pose_results, _ = inference_top_down_pose_model(
pose_model,
image_name, [{
'bbox': [50, 50, 0, 0]
}],
format='xywh',
dataset='TopDownMpiiDataset')
pose_results, next_id = get_track_id(pose_results, pose_results_last,
next_id)
# show the results
vis_pose_tracking_result(
pose_model, image_name, pose_results, dataset='TopDownMpiiDataset')
with pytest.raises(NotImplementedError):
vis_pose_tracking_result(
pose_model, image_name, pose_results, dataset='test')
| 37.523364
| 80
| 0.636862
|
4a13187a24c5050bfb6c39b80d21bc6b33374644
| 2,840
|
py
|
Python
|
web_server/parsers/WIP/download_sport.py
|
yutkin/News-Aggregator
|
b35b2cdd873121aab03cb14c191b2a3b4d3d5180
|
[
"MIT"
] | 17
|
2017-05-09T13:03:21.000Z
|
2022-01-08T18:32:01.000Z
|
web_server/parsers/WIP/download_sport.py
|
uav-profile/News-Aggregator
|
b35b2cdd873121aab03cb14c191b2a3b4d3d5180
|
[
"MIT"
] | null | null | null |
web_server/parsers/WIP/download_sport.py
|
uav-profile/News-Aggregator
|
b35b2cdd873121aab03cb14c191b2a3b4d3d5180
|
[
"MIT"
] | 6
|
2018-04-23T03:28:33.000Z
|
2021-04-02T06:29:23.000Z
|
import csv
import requests
from multiprocessing import Process, Queue, Value, Lock, current_process
import queue
from datetime import datetime, timedelta
import signal
import logging
import time
import pandas as pd
from bs4 import BeautifulSoup
NUM_JOBS = 16
def url_fetcher(Q, sync_flag):
curr_page = 2
url_counter = 0
while True:
url_to_fetch = 'http://www.sport-express.ru/news/page' + str(curr_page) + '/'
try:
response = requests.get(url_to_fetch)
if response.status_code != requests.codes.ok:
raise Exception()
except Exception:
sync_flag.value = 0
break
html_tree = BeautifulSoup(response.text, 'lxml')
news_list = html_tree.find_all('div', 'recent_item')
for news in news_list:
news_url = news.find('a', 'fs_20')['href']
Q.put(news_url)
url_counter += 1
if url_counter % 1000 == 0:
logging.debug('total downloaded %d urls' % url_counter)
curr_page += 1
def fetch_news(Q, sync_flag):
signal.signal(signal.SIGINT, signal.SIG_IGN)
news_storage = []
pid = current_process().pid
while sync_flag.value == 1:
try:
url = Q.get_nowait()
except queue.Empty:
continue
response = requests.get(url)
if response.status_code == requests.codes.ok:
html = BeautifulSoup(response.text, 'lxml')
try:
paragraphs = html.find('div', 'article_text').find_all('p')
text = ' '.join([p.get_text() for p in paragraphs])
topic = html.find('div',
'fs_13').find('a')['href']
title = html.find('h1', 'trebuchet').get_text()
except Exception:
continue
news_storage.append({'title': title, 'url': url, 'text': text,
'topic': topic})
logging.debug('%s' % url)
# logging.debug('Stopped, writing to news_sport_%d.csv' % pid)
pd.DataFrame(news_storage).to_csv('./data/sport/news_sport_%d.csv' % pid,
encoding='utf-8', index=None)
def main():
logging.basicConfig(level=logging.DEBUG,
format='[PID %(process)d %(asctime)s] %(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logging.getLogger('requests').setLevel(logging.CRITICAL)
Q = Queue()
sync_flag = Value('i', 1)
workers = []
for _ in range(NUM_JOBS):
workers.append(Process(target=fetch_news, args=(Q, sync_flag)))
workers[-1].start()
try:
url_fetcher(Q, sync_flag)
except KeyboardInterrupt:
sync_flag.value = 0
finally:
for worker in workers:
worker.join()
if __name__ == '__main__':
main()
| 30.869565
| 85
| 0.572183
|
4a131933ccac252afeeeb38b0440c195f17ee5af
| 12,526
|
py
|
Python
|
cron-jobs/validation/packit-service-validation.py
|
mmuzila/deployment
|
314199992c9cb6595e43ee9f97e130bcc0ddb308
|
[
"MIT"
] | null | null | null |
cron-jobs/validation/packit-service-validation.py
|
mmuzila/deployment
|
314199992c9cb6595e43ee9f97e130bcc0ddb308
|
[
"MIT"
] | null | null | null |
cron-jobs/validation/packit-service-validation.py
|
mmuzila/deployment
|
314199992c9cb6595e43ee9f97e130bcc0ddb308
|
[
"MIT"
] | null | null | null |
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import enum
import sentry_sdk
import time
from copr.v3 import Client
from datetime import datetime, timedelta, date
from os import getenv
from github import InputGitAuthor
from ogr.services.github import GithubService
from ogr.abstract import PullRequest
copr = Client.create_from_config_file()
service = GithubService(token=getenv("GITHUB_TOKEN"))
project = service.get_project(repo="hello-world", namespace="packit")
user = InputGitAuthor(
name="Release Bot", email="user-cont-team+release-bot@redhat.com"
)
class Trigger(str, enum.Enum):
comment = "comment"
pr_opened = "pr_opened"
push = "push"
class Testcase:
def __init__(self, pr: PullRequest = None, trigger: Trigger = Trigger.pr_opened):
self.pr = pr
self.failure_msg = ""
self.trigger = trigger
self._copr_project_name = None
@property
def copr_project_name(self):
"""
Get the name of Copr project from id of the PR.
:return:
"""
if self.pr and not self._copr_project_name:
self._copr_project_name = (
f"packit-hello-world-{self.pr.id}"
)
return self._copr_project_name
def run_test(self):
"""
Run all checks, if there is any failure message, send it to Sentry and in case of
opening PR close it.
:return:
"""
self.run_checks()
if self.failure_msg:
sentry_sdk.capture_message(
f"{self.pr.title} ({self.pr.url}) failed: {self.failure_msg}"
)
if self.trigger == Trigger.pr_opened:
self.pr.close()
def trigger_build(self):
"""
Trigger the build (by commenting/pushing to the PR/opening a new PR).
:return:
"""
if self.trigger == Trigger.comment:
project.pr_comment(self.pr.id, "/packit build")
elif self.trigger == Trigger.push:
self.push_to_pr()
else:
self.create_pr()
def push_to_pr(self):
"""
Push a new commit to the PR.
:return:
"""
contents = project.github_repo.get_contents(
"test.txt", ref=self.pr.source_branch
)
# https://pygithub.readthedocs.io/en/latest/examples/Repository.html#update-a-file-in-the-repository
# allows empty commit (always the same content of file)
project.github_repo.update_file(
path=contents.path,
message=f"Commit build trigger ({date.today().strftime('%d/%m/%y')})",
content="Testing the push trigger.",
sha=contents.sha,
branch=self.pr.source_branch,
committer=user,
author=user,
)
def create_pr(self):
"""
Create a new PR, if the source branch 'test_case_opened_pr' does not exist,
create one and commit some changes before it.
:return:
"""
source_branch = "test_case_opened_pr"
pr_title = "Basic test case - opened PR trigger"
if source_branch not in project.get_branches():
# if the source branch does not exist, create one
# and create a commit
commit = project.github_repo.get_commit("HEAD")
project.github_repo.create_git_ref(f"refs/heads/{source_branch}", commit.sha)
project.github_repo.create_file(
path="test.txt",
message="Opened PR trigger",
content="Testing the opened PR trigger.",
branch=source_branch,
committer=user,
author=user
)
existing_pr = [pr for pr in project.get_pr_list() if pr.title == pr_title]
if len(existing_pr) == 1:
existing_pr[0].close()
self.pr = project.create_pr(
title=pr_title,
body="This test case is triggered automatically by our validation script.",
target_branch="master",
source_branch=source_branch,
)
def run_checks(self):
"""
Run all checks of the test case.
:return:
"""
build = self.check_build_submitted()
if not build:
return
self.check_build(build.id)
self.check_statuses()
self.check_comment()
def check_statuses_set_to_pending(self):
"""
Check whether some commit status is set to pending (they are updated in loop
so it is enough).
:return:
"""
statuses = [
status.context
for status in self.get_statuses()
if "packit-stg" not in status.context
]
watch_end = datetime.now() + timedelta(seconds=60)
failure_message = (
"Github statuses were not set "
"to pending in time 1 minute.\n"
)
# when a new PR is opened
while len(statuses) == 0:
if datetime.now() > watch_end:
self.failure_msg += failure_message
return
statuses = [
status.context
for status in self.get_statuses()
if "packit-stg" not in status.context
]
while True:
if datetime.now() > watch_end:
self.failure_msg += failure_message
return
new_statuses = [
(status.context, status.state)
for status in self.get_statuses()
if status.context in statuses
]
for name, state in new_statuses:
if state == "pending":
return
time.sleep(5)
def check_build_submitted(self):
"""
Check whether the build was submitted in Copr in time 30 minutes.
:return:
"""
if self.pr:
try:
old_build_len = len(
copr.build_proxy.get_list("packit", self.copr_project_name)
)
except Exception:
old_build_len = 0
old_comment_len = len(project.get_pr_comments(self.pr.id))
else:
# the PR is not created yet
old_build_len = 0
old_comment_len = 0
self.trigger_build()
watch_end = datetime.now() + timedelta(seconds=60 * 30)
self.check_statuses_set_to_pending()
while True:
if datetime.now() > watch_end:
self.failure_msg += (
"The build was not submitted in Copr in time 30 minutes.\n"
)
return None
try:
new_builds = copr.build_proxy.get_list("packit", self.copr_project_name)
except Exception:
# project does not exist yet
continue
if len(new_builds) >= old_build_len + 1:
return new_builds[0]
new_comments = project.get_pr_comments(self.pr.id, reverse=True)
new_comments = new_comments[: (len(new_comments) - old_comment_len)]
if len(new_comments) > 1:
comment = [
comment.comment
for comment in new_comments
if comment.author == "packit-as-a-service[bot]"
]
if len(comment) > 0:
if "error" in comment[0] or "whitelist" in comment[0]:
self.failure_msg += (
f"The build was not submitted in Copr, "
f"Github comment from p-s: {comment[0]}\n"
)
return None
else:
self.failure_msg += (
f"New github comment from p-s while "
f"submitting Copr build: {comment[0]}\n"
)
time.sleep(30)
def check_build(self, build_id):
"""
Check whether the build was successful in Copr in time 30 minutes.
:param build_id: ID of the build
:return:
"""
watch_end = datetime.now() + timedelta(seconds=60 * 30)
state_reported = ""
while True:
if datetime.now() > watch_end:
self.failure_msg += "The build did not finish in time 30 minutes.\n"
return
build = copr.build_proxy.get(build_id)
if build.state == state_reported:
time.sleep(20)
continue
state_reported = build.state
if state_reported not in [
"running",
"pending",
"starting",
"forked",
"importing",
"waiting",
]:
if state_reported != "succeeded":
self.failure_msg += (
f"The build in Copr was not successful. "
f"Copr state: {state_reported}.\n"
)
return
time.sleep(30)
def watch_statuses(self):
"""
Watch the statuses 20 minutes, if there is no pending commit status, return the statuses.
:return: [CommitStatus]
"""
watch_end = datetime.now() + timedelta(seconds=60 * 20)
while True:
statuses = self.get_statuses()
states = [
status.state
for status in statuses
if "packit-stg" not in status.context
]
if "pending" not in states:
break
if datetime.now() > watch_end:
self.failure_msg += (
"These statuses were set to pending 20 minutes "
"after Copr build had been built:\n"
)
for status in statuses:
if "packit-stg" not in status.context and status.state == "pending":
self.failure_msg += f"{status.context}\n"
return []
time.sleep(20)
return statuses
def check_statuses(self):
"""
Check whether all statuses are set to success.
:return:
"""
if "The build in Copr was not successful." in self.failure_msg:
return
statuses = self.watch_statuses()
for status in statuses:
if "packit-stg" not in status.context and status.state == "failed":
self.failure_msg += (
f"Status {status.context} was set to failure although the build in "
f"Copr was successful, message: {status.description}.\n"
)
def check_comment(self):
"""
Check whether p-s has commented when the Copr build was not successful.
:return:
"""
failure = "The build in Copr was not successful." in self.failure_msg
if failure:
packit_comments = [
comment
for comment in project.get_pr_comments(self.pr.id, reverse=True)
if comment.author == "packit-as-a-service[bot]"
]
if not packit_comments:
self.failure_msg += (
"No comment from p-s about unsuccessful last copr build found.\n"
)
def get_statuses(self):
"""
Get commit statuses from the most recent commit.
:return: [CommitStatus]
"""
commit_sha = project.get_all_pr_commits(self.pr.id)[-1]
commit = project.github_repo.get_commit(commit_sha)
return commit.get_combined_status().statuses
if __name__ == "__main__":
sentry_sdk.init(getenv("SENTRY_SECRET"))
# run testcases where the build is triggered by a '/packit build' comment
prs_for_comment = [
pr for pr in project.get_pr_list() if pr.title.startswith("Basic test case:")
]
for pr in prs_for_comment:
Testcase(pr=pr, trigger=Trigger.comment).run_test()
# run testcase where the build is triggered by push
pr_for_push = [
pr
for pr in project.get_pr_list()
if pr.title.startswith("Basic test case - push trigger")
]
if pr_for_push:
Testcase(pr=pr_for_push[0], trigger=Trigger.push).run_test()
# run testcase where the build is triggered by opening a new PR
Testcase().run_test()
| 32.117949
| 108
| 0.539997
|
4a131ab8208a3c7526dec611ad8c4170fe350c2f
| 1,119
|
py
|
Python
|
galaxy/api/aggregators.py
|
maxamillion/galaxy
|
0460baf9d2c8da0a0e88c7975eca2e3abcc82f23
|
[
"Apache-2.0"
] | null | null | null |
galaxy/api/aggregators.py
|
maxamillion/galaxy
|
0460baf9d2c8da0a0e88c7975eca2e3abcc82f23
|
[
"Apache-2.0"
] | 1
|
2021-06-10T23:59:59.000Z
|
2021-06-10T23:59:59.000Z
|
galaxy/api/aggregators.py
|
connectthefuture/galaxy
|
841821957680643e07c1a94fb609f8e4117c19d1
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2012-2016, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import django.db.models.aggregates
# Usage (to retrieve objects with highest average, NULLs become zeroes and are last):
# MyModel.objects.annotate(average=AvgWithZeroForNull('other_model__field_name')).order_by('-average')
class AvgWithZeroForNull(django.db.models.aggregates.Avg):
template = 'COALESCE(%(function)s(%(field)s), 0)'
name = 'AvgWithZeroForNull'
django.db.models.aggregates.AvgWithZeroForNull = AvgWithZeroForNull
| 41.444444
| 102
| 0.76765
|
4a131b715e9bcc86f6d20e7cb70c05190ede31a6
| 6,613
|
py
|
Python
|
fence/blueprints/data/blueprint.py
|
ADParedes/fence
|
81afd1914c483da5514d0bcc13ecbfda9758dd9f
|
[
"Apache-2.0"
] | null | null | null |
fence/blueprints/data/blueprint.py
|
ADParedes/fence
|
81afd1914c483da5514d0bcc13ecbfda9758dd9f
|
[
"Apache-2.0"
] | 1
|
2019-11-01T08:30:28.000Z
|
2019-11-01T08:30:28.000Z
|
fence/blueprints/data/blueprint.py
|
ADParedes/fence
|
81afd1914c483da5514d0bcc13ecbfda9758dd9f
|
[
"Apache-2.0"
] | 3
|
2019-10-16T04:27:54.000Z
|
2019-10-24T02:27:52.000Z
|
import flask
from cdislogging import get_logger
from fence.auth import login_required, require_auth_header, current_token
from fence.blueprints.data.indexd import (
BlankIndex,
IndexedFile,
get_signed_url_for_file,
)
from fence.errors import Forbidden, InternalError, UserError
from fence.utils import is_valid_expiration
from fence.authz.auth import check_arborist_auth
logger = get_logger(__name__)
blueprint = flask.Blueprint("data", __name__)
@blueprint.route("/<path:file_id>", methods=["DELETE"])
@require_auth_header(aud={"data"})
@login_required({"data"})
def delete_data_file(file_id):
"""
Delete all the locations for a data file which was uploaded to bucket storage from
indexd.
If the data file is still at the first stage where it belongs to just the uploader
(and isn't linked to a project), then the deleting user should match the uploader
field on the record in indexd. Otherwise, the user must have delete permissions in
the project.
Args:
file_id (str): GUID of file to delete
"""
record = IndexedFile(file_id)
# check auth: user must have uploaded the file (so `uploader` field on the record is
# this user)
uploader = record.index_document.get("uploader")
if not uploader:
raise Forbidden("deleting submitted records is not supported")
if current_token["context"]["user"]["name"] != uploader:
raise Forbidden("user is not uploader for file {}".format(file_id))
logger.info("deleting record and files for {}".format(file_id))
record.delete_files(delete_all=True)
return record.delete()
@blueprint.route("/upload", methods=["POST"])
@require_auth_header(aud={"data"})
@login_required({"data"})
@check_arborist_auth(resource="/data_file", method="file_upload")
def upload_data_file():
"""
Return a presigned URL for use with uploading a data file.
See the documentation on the entire flow here for more info:
https://github.com/uc-cdis/cdis-wiki/tree/master/dev/gen3/data_upload
"""
# make new record in indexd, with just the `uploader` field (and a GUID)
params = flask.request.get_json()
if not params:
raise UserError("wrong Content-Type; expected application/json")
if "file_name" not in params:
raise UserError("missing required argument `file_name`")
blank_index = BlankIndex(file_name=params["file_name"])
expires_in = flask.current_app.config.get("MAX_PRESIGNED_URL_TTL", 3600)
if "expires_in" in params:
is_valid_expiration(params["expires_in"])
expires_in = min(params["expires_in"], expires_in)
response = {
"guid": blank_index.guid,
"url": blank_index.make_signed_url(params["file_name"], expires_in=expires_in),
}
return flask.jsonify(response), 201
@blueprint.route("/multipart/init", methods=["POST"])
@require_auth_header(aud={"data"})
@login_required({"data"})
@check_arborist_auth(resource="/data_file", method="file_upload")
def init_multipart_upload():
"""
Initialize a multipart upload request
"""
params = flask.request.get_json()
if not params:
raise UserError("wrong Content-Type; expected application/json")
if "file_name" not in params:
raise UserError("missing required argument `file_name`")
blank_index = BlankIndex(file_name=params["file_name"])
expires_in = flask.current_app.config.get("MAX_PRESIGNED_URL_TTL", 3600)
if "expires_in" in params:
is_valid_expiration(params["expires_in"])
expires_in = min(params["expires_in"], expires_in)
response = {
"guid": blank_index.guid,
"uploadId": BlankIndex.init_multipart_upload(
blank_index.guid + "/" + params["file_name"], expires_in=expires_in
),
}
return flask.jsonify(response), 201
@blueprint.route("/multipart/upload", methods=["POST"])
@require_auth_header(aud={"data"})
@login_required({"data"})
@check_arborist_auth(resource="/data_file", method="file_upload")
def generate_multipart_upload_presigned_url():
"""
Generate multipart upload presigned url
"""
params = flask.request.get_json()
if not params:
raise UserError("wrong Content-Type; expected application/json")
missing = {"key", "uploadId", "partNumber"}.difference(set(params))
if missing:
raise UserError("missing required arguments: {}".format(list(missing)))
expires_in = flask.current_app.config.get("MAX_PRESIGNED_URL_TTL", 3600)
if "expires_in" in params:
is_valid_expiration(params["expires_in"])
expires_in = min(params["expires_in"], expires_in)
response = {
"presigned_url": BlankIndex.generate_aws_presigned_url_for_part(
params["key"],
params["uploadId"],
params["partNumber"],
expires_in=expires_in,
)
}
return flask.jsonify(response), 200
@blueprint.route("/multipart/complete", methods=["POST"])
@require_auth_header(aud={"data"})
@login_required({"data"})
@check_arborist_auth(resource="/data_file", method="file_upload")
def complete_multipart_upload():
"""
Complete multipart upload
"""
params = flask.request.get_json()
if not params:
raise UserError("wrong Content-Type; expected application/json")
missing = {"key", "uploadId", "parts"}.difference(set(params))
if missing:
raise UserError("missing required arguments: {}".format(list(missing)))
expires_in = flask.current_app.config.get("MAX_PRESIGNED_URL_TTL", 3600)
if "expires_in" in params:
is_valid_expiration(params["expires_in"])
expires_in = min(params["expires_in"], expires_in)
try:
BlankIndex.complete_multipart_upload(
params["key"], params["uploadId"], params["parts"], expires_in=expires_in
),
except InternalError as e:
return flask.jsonify({"message": e.message}), e.code
return flask.jsonify({"message": "OK"}), 200
@blueprint.route("/upload/<path:file_id>", methods=["GET"])
def upload_file(file_id):
"""
Get a presigned url to upload a file given by file_id.
"""
result = get_signed_url_for_file("upload", file_id)
return flask.jsonify(result)
@blueprint.route("/download/<path:file_id>", methods=["GET"])
def download_file(file_id):
"""
Get a presigned url to download a file given by file_id.
"""
result = get_signed_url_for_file("download", file_id)
if not "redirect" in flask.request.args or not "url" in result:
return flask.jsonify(result)
return flask.redirect(result["url"])
| 35.175532
| 88
| 0.692575
|
4a131c288f319ae02f152c18ebf853b0460e8c9a
| 1,653
|
py
|
Python
|
core/helper.py
|
Metleb1996/histents
|
f57a6cc6f496a58b370336ac7009f9519fccb4e2
|
[
"MIT"
] | null | null | null |
core/helper.py
|
Metleb1996/histents
|
f57a6cc6f496a58b370336ac7009f9519fccb4e2
|
[
"MIT"
] | null | null | null |
core/helper.py
|
Metleb1996/histents
|
f57a6cc6f496a58b370336ac7009f9519fccb4e2
|
[
"MIT"
] | null | null | null |
import re
import datetime
def user_data_control(data: dict):
try:
for i in data.keys():
data[i] = str(data[i]).strip()
except Exception as e:
return str(e), False
if len(data['user_name']) < 5 or len(data['user_name']) > 80:
return "The username complies with the rules. Please use a minimum of 5 and a maximum of 79 simvols.", False
if len(data['user_email']) < 5 or len(data['user_email']) > 120 or not is_email(data['user_email']):
return "It looks like your email address is wrong. Please use a real email address.", False
if len(data['user_password']) < 10 or len(data['user_password']) > 256 :
return "Use at least 10 and at most 255 simvols in your password.", False
return data, True
def event_data_control(data: dict, id_ccontrol=False):
try:
for i in data.keys():
data[i] = str(data[i]).strip()
except Exception as e:
return str(e), False
if len(data['e_text']) < 5 or len(data['e_text']) > 2047:
return "The event text complies with the rules. Please use a minimum of 5 and a maximum of 2047 simvols.", False
if id_ccontrol:
try:
data['e_id'] = int(data.get('e_id'))
except Exception as e:
return str(e), False
try:
data['e_date'] = datetime.datetime.strptime(data['e_date'], "%Y %m %d")
except ValueError:
return "Use only '%Y %m %d' format for e_date", False
except Exception as e:
return str(e), False
return data, True
def is_email(email):
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
if re.fullmatch(regex, email):
return True
return False
| 38.44186
| 121
| 0.629764
|
4a131c8358cc8153bac7c813a5a7be64af6502de
| 1,783
|
py
|
Python
|
src/reactions/MassAction.py
|
AnEvilBurrito/model-builder
|
f1a7d3a53c7d40b359a5e6521a51869f307ef48c
|
[
"MIT"
] | null | null | null |
src/reactions/MassAction.py
|
AnEvilBurrito/model-builder
|
f1a7d3a53c7d40b359a5e6521a51869f307ef48c
|
[
"MIT"
] | null | null | null |
src/reactions/MassAction.py
|
AnEvilBurrito/model-builder
|
f1a7d3a53c7d40b359a5e6521a51869f307ef48c
|
[
"MIT"
] | null | null | null |
from .Reactions import Reactions
# from Reactions import Reactions
class MassAction(Reactions):
# Simplified Mass Action with only two forward specie and one backward specie,
# with molecularities of 1
def __init__(self, forwardSpecie1: str, forwardSpecie2: str, backwardSpecie: str = '_Auto', name='', Ka: float = 0.001, Kd: float = 0.01):
if backwardSpecie == '_Auto':
backwardSpecie = forwardSpecie1 + 'u' + forwardSpecie2
super().__init__([forwardSpecie1, forwardSpecie2], backwardSpecie, name)
self.type = "MassAction"
self.params = {
'ka': Ka,
'kd': Kd
}
self.__renameParams()
def __renameParams(self):
kaStr = "ka_{f1}_{f2}".format(f1=self.fs[0], f2=self.fs[1])
kdStr = "kd_{b1}".format(b1=self.bs[0])
self.paramNames['ka'] = kaStr
self.paramNames['kd'] = kdStr
def computeForward(self, stateVars: dict):
fs1 = stateVars[self.fs[0]]
fs2 = stateVars[self.fs[1]]
return self.params['ka'] * fs1 * fs2
def computeBackward(self, stateVars: dict):
b1 = stateVars[self.bs[0]]
return self.params['kd'] * b1
def getEqHeaderStr(self, index):
return "{forward1} + {forward2} <=> {backward} :R{i}".format(forward1=self.fs[0], forward2=self.fs[1], backward=self.bs[0], i=index)
def getForwardEqStr(self):
return "{ka} * {fs1} * {fs2}".format(ka=self.paramNames['ka'], fs1=self.fs[0], fs2=self.fs[1])
def getBackwardEqStr(self):
return "{kd} * {bs}".format(kd=self.paramNames['kd'], bs=self.bs[0])
if __name__ == "__main__":
ma = MassAction('Sos', 'Grb2')
print(ma.fs, ma.bs)
print(ma.params)
print(ma.paramNames)
| 28.301587
| 142
| 0.601795
|
4a131d287deb8c717c4e210145e70334120cba77
| 4,763
|
py
|
Python
|
kikit/eeschema_v6.py
|
TadeasPilar/KiKit
|
8364e085a9b6358df645c33ce3e62629b239f704
|
[
"MIT"
] | null | null | null |
kikit/eeschema_v6.py
|
TadeasPilar/KiKit
|
8364e085a9b6358df645c33ce3e62629b239f704
|
[
"MIT"
] | null | null | null |
kikit/eeschema_v6.py
|
TadeasPilar/KiKit
|
8364e085a9b6358df645c33ce3e62629b239f704
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from kikit.sexpr import Atom, parseSexprF
from itertools import islice
import os
from typing import Optional
@dataclass
class Symbol:
uuid: Optional[str] = None
path: Optional[str] = None
unit: Optional[int] = None
lib_id: Optional[str] = None
in_bom: Optional[bool] = None
on_board: Optional[bool] = None
properties: dict = field(default_factory=dict)
@dataclass
class SymbolInstance:
path: Optional[str] = None
reference: Optional[str] = None
unit: Optional[int] = None
value: Optional[str] = None
footprint: Optional[str] = None
def getProperty(sexpr, field):
for x in islice(sexpr, 1, None):
if len(x) > 0 and \
isinstance(x[0], Atom) and x[0].value == "property" and \
isinstance(x[1], Atom) and x[1].value == field:
return x[2].value
return None
def isSymbol(sexpr):
if isinstance(sexpr, Atom) or len(sexpr) == 0:
return False
item = sexpr[0]
return isinstance(item, Atom) and item.value == "symbol"
def isSymbolInstances(sexpr):
if isinstance(sexpr, Atom) or len(sexpr) == 0:
return False
item = sexpr[0]
return isinstance(item, Atom) and item.value == "symbol_instances"
def isSheet(sexpr):
if isinstance(sexpr, Atom) or len(sexpr) == 0:
return False
item = sexpr[0]
return isinstance(item, Atom) and item.value == "sheet"
def isPath(sexpr):
if isinstance(sexpr, Atom) or len(sexpr) == 0:
return False
item = sexpr[0]
return isinstance(item, Atom) and item.value == "path"
def getUuid(sexpr):
for x in islice(sexpr, 1, None):
if x and x[0] == "uuid":
return x[1].value
return None
def extractSymbol(sexpr, path):
s = Symbol()
for x in islice(sexpr, 1, None):
if not x:
continue
key = x[0]
if not isinstance(key, Atom):
continue
key = key.value
if key == "lib_id":
s.lib_id = x[1].value
elif key == "lib_id":
s.unit = int(x[1].value)
elif key == "uuid":
s.uuid = x[1].value
s.path = path + "/" + s.uuid
elif key == "in_bom":
s.in_bom = x[1].value == "yes"
elif key == "on_board":
s.on_board = x[1].value == "yes"
elif key == "property":
s.properties[x[1].value] = x[2].value
return s
def extractSymbolInstance(sexpr):
s = SymbolInstance()
s.path = sexpr[1].value
for x in islice(sexpr, 2, None):
if not len(x) > 1:
continue
key = x[0]
if not isinstance(key, Atom):
continue
key = key.value
if key == "reference":
s.reference = x[1].value
elif key == "unit":
s.unit = int(x[1].value)
elif key == "value":
s.value = x[1].value
elif key == "footprint":
s.footprint = x[1].value
return s
def collectSymbols(filename, path=""):
"""
Crawl given sheet and return two lists - one with symbols, one with
symbol instances
"""
with open(filename) as f:
import time
start_time = time.time()
sheetSExpr = parseSexprF(f)
symbols, instances = [], []
for item in sheetSExpr.items:
if isSymbol(item):
symbols.append(extractSymbol(item, path))
continue
if isSheet(item):
f = getProperty(item, "Sheet file")
uuid = getUuid(item)
dirname = os.path.dirname(filename)
if len(dirname) > 0:
f = dirname + "/" + f
s, i = collectSymbols(f, path + "/" + uuid)
symbols += s
instances += i
continue
if isSymbolInstances(item):
for p in item.items:
if isPath(p):
instances.append(extractSymbolInstance(p))
continue
return symbols, instances
def getField(component, field):
return component.properties.get(field, None)
def getUnit(component):
return component.unit
def getReference(component):
return component.properties["Reference"]
def extractComponents(filename):
symbols, instances = collectSymbols(filename)
symbolsDict = {x.path: x for x in symbols}
assert len(symbols) == len(instances)
components = []
for inst in instances:
s = symbolsDict[inst.path]
# Note that s should be unique, so we can safely modify it
s.properties["Reference"] = inst.reference
s.properties["Value"] = inst.value
s.properties["Footprint"] = inst.footprint
s.unit = inst.unit
components.append(s)
return components
| 28.866667
| 71
| 0.575898
|
4a131da1d458918f774e879a2294d985cf75ded0
| 10,362
|
py
|
Python
|
spearmint/tests/kernels/test_matern.py
|
jatinarora2409/Spearmint
|
a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab
|
[
"RSA-MD"
] | 1,590
|
2015-01-02T19:11:29.000Z
|
2022-03-31T13:36:16.000Z
|
spearmint/tests/kernels/test_matern.py
|
jatinarora2409/Spearmint
|
a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab
|
[
"RSA-MD"
] | 99
|
2015-02-20T06:45:49.000Z
|
2021-12-06T13:28:44.000Z
|
spearmint/tests/kernels/test_matern.py
|
jatinarora2409/Spearmint
|
a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab
|
[
"RSA-MD"
] | 366
|
2015-01-17T20:29:49.000Z
|
2022-02-21T16:22:31.000Z
|
# -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: otd@harvard.edu
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import numpy as np
import numpy.random as npr
from spearmint.kernels import Matern52
def test_matern_grad():
npr.seed(1)
eps = 1e-5
N = 10
M = 5
D = 3
kernel = Matern52(D)
data1 = npr.randn(N,D)
data2 = npr.randn(M,D)
loss = np.sum(kernel.cross_cov(data1, data2))
dloss = kernel.cross_cov_grad_data(data1, data2).sum(0)
dloss_est = np.zeros(dloss.shape)
for i in xrange(M):
for j in xrange(D):
data2[i,j] += eps
loss_1 = np.sum(kernel.cross_cov(data1, data2))
data2[i,j] -= 2*eps
loss_2 = np.sum(kernel.cross_cov(data1, data2))
data2[i,j] += eps
dloss_est[i,j] = ((loss_1 - loss_2) / (2*eps))
assert np.linalg.norm(dloss - dloss_est) < 1e-6
| 47.1
| 70
| 0.763849
|
4a131dfd43b8c3f66c2dbf87317c1d2c29601ead
| 10,520
|
py
|
Python
|
colour_demosaicing/bayer/demosaicing/menon2007.py
|
MengmSun/colour-demosaicing
|
3f3893403e467c1cffc17cb708db3a5669b42d18
|
[
"BSD-3-Clause"
] | 1
|
2022-03-03T13:26:20.000Z
|
2022-03-03T13:26:20.000Z
|
colour_demosaicing/bayer/demosaicing/menon2007.py
|
MengmSun/colour-demosaicing
|
3f3893403e467c1cffc17cb708db3a5669b42d18
|
[
"BSD-3-Clause"
] | null | null | null |
colour_demosaicing/bayer/demosaicing/menon2007.py
|
MengmSun/colour-demosaicing
|
3f3893403e467c1cffc17cb708db3a5669b42d18
|
[
"BSD-3-Clause"
] | null | null | null |
"""
DDFAPD - Menon (2007) Bayer CFA Demosaicing
===========================================
*Bayer* CFA (Colour Filter Array) DDFAPD - *Menon (2007)* demosaicing.
References
----------
- :cite:`Menon2007c` : Menon, D., Andriani, S., & Calvagno, G. (2007).
Demosaicing With Directional Filtering and a posteriori Decision. IEEE
Transactions on Image Processing, 16(1), 132-141.
doi:10.1109/TIP.2006.884928
"""
from __future__ import annotations
import numpy as np
from scipy.ndimage.filters import convolve, convolve1d
from colour.hints import ArrayLike, Boolean, Literal, NDArray, Union
from colour.utilities import as_float_array, tsplit, tstack
from colour_demosaicing.bayer import masks_CFA_Bayer
__author__ = "Colour Developers"
__copyright__ = "Copyright 2015 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"demosaicing_CFA_Bayer_Menon2007",
"demosaicing_CFA_Bayer_DDFAPD",
"refining_step_Menon2007",
]
def _cnv_h(x: ArrayLike, y: ArrayLike) -> NDArray:
"""Perform horizontal convolution."""
return convolve1d(x, y, mode="mirror")
def _cnv_v(x: ArrayLike, y: ArrayLike) -> NDArray:
"""Perform vertical convolution."""
return convolve1d(x, y, mode="mirror", axis=0)
def demosaicing_CFA_Bayer_Menon2007(
CFA: ArrayLike,
pattern: Union[Literal["RGGB", "BGGR", "GRBG", "GBRG"], str] = "RGGB",
refining_step: Boolean = True,
):
"""
Return the demosaiced *RGB* colourspace array from given *Bayer* CFA using
DDFAPD - *Menon (2007)* demosaicing algorithm.
Parameters
----------
CFA
*Bayer* CFA.
pattern
Arrangement of the colour filters on the pixel array.
refining_step
Perform refining step.
Returns
-------
:class:`numpy.ndarray`
*RGB* colourspace array.
Notes
-----
- The definition output is not clipped in range [0, 1] : this allows for
direct HDRI / radiance image generation on *Bayer* CFA data and post
demosaicing of the high dynamic range data as showcased in this
`Jupyter Notebook <https://github.com/colour-science/colour-hdri/\
blob/develop/colour_hdri/examples/\
examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
References
----------
:cite:`Menon2007c`
Examples
--------
>>> CFA = np.array(
... [[ 0.30980393, 0.36078432, 0.30588236, 0.3764706 ],
... [ 0.35686275, 0.39607844, 0.36078432, 0.40000001]])
>>> demosaicing_CFA_Bayer_Menon2007(CFA)
array([[[ 0.30980393, 0.35686275, 0.39215687],
[ 0.30980393, 0.36078432, 0.39607844],
[ 0.30588236, 0.36078432, 0.39019608],
[ 0.32156864, 0.3764706 , 0.40000001]],
<BLANKLINE>
[[ 0.30980393, 0.35686275, 0.39215687],
[ 0.30980393, 0.36078432, 0.39607844],
[ 0.30588236, 0.36078432, 0.39019609],
[ 0.32156864, 0.3764706 , 0.40000001]]])
>>> CFA = np.array(
... [[ 0.3764706 , 0.36078432, 0.40784314, 0.3764706 ],
... [ 0.35686275, 0.30980393, 0.36078432, 0.29803923]])
>>> demosaicing_CFA_Bayer_Menon2007(CFA, 'BGGR')
array([[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]],
<BLANKLINE>
[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]]])
"""
CFA = as_float_array(CFA)
R_m, G_m, B_m = masks_CFA_Bayer(CFA.shape, pattern)
h_0 = as_float_array([0.0, 0.5, 0.0, 0.5, 0.0])
h_1 = as_float_array([-0.25, 0.0, 0.5, 0.0, -0.25])
R = CFA * R_m
G = CFA * G_m
B = CFA * B_m
G_H = np.where(G_m == 0, _cnv_h(CFA, h_0) + _cnv_h(CFA, h_1), G)
G_V = np.where(G_m == 0, _cnv_v(CFA, h_0) + _cnv_v(CFA, h_1), G)
C_H = np.where(R_m == 1, R - G_H, 0)
C_H = np.where(B_m == 1, B - G_H, C_H)
C_V = np.where(R_m == 1, R - G_V, 0)
C_V = np.where(B_m == 1, B - G_V, C_V)
D_H = np.abs(C_H - np.pad(C_H, ((0, 0), (0, 2)), mode="reflect")[:, 2:])
D_V = np.abs(C_V - np.pad(C_V, ((0, 2), (0, 0)), mode="reflect")[2:, :])
del h_0, h_1, CFA, C_V, C_H
k = as_float_array(
[
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 3.0, 0.0, 3.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
]
)
d_H = convolve(D_H, k, mode="constant")
d_V = convolve(D_V, np.transpose(k), mode="constant")
del D_H, D_V
mask = d_V >= d_H
G = np.where(mask, G_H, G_V)
M = np.where(mask, 1, 0)
del d_H, d_V, G_H, G_V
# Red rows.
R_r = np.transpose(np.any(R_m == 1, axis=1)[np.newaxis]) * np.ones(R.shape)
# Blue rows.
B_r = np.transpose(np.any(B_m == 1, axis=1)[np.newaxis]) * np.ones(B.shape)
k_b = as_float_array([0.5, 0, 0.5])
R = np.where(
np.logical_and(G_m == 1, R_r == 1),
G + _cnv_h(R, k_b) - _cnv_h(G, k_b),
R,
)
R = np.where(
np.logical_and(G_m == 1, B_r == 1) == 1,
G + _cnv_v(R, k_b) - _cnv_v(G, k_b),
R,
)
B = np.where(
np.logical_and(G_m == 1, B_r == 1),
G + _cnv_h(B, k_b) - _cnv_h(G, k_b),
B,
)
B = np.where(
np.logical_and(G_m == 1, R_r == 1) == 1,
G + _cnv_v(B, k_b) - _cnv_v(G, k_b),
B,
)
R = np.where(
np.logical_and(B_r == 1, B_m == 1),
np.where(
M == 1,
B + _cnv_h(R, k_b) - _cnv_h(B, k_b),
B + _cnv_v(R, k_b) - _cnv_v(B, k_b),
),
R,
)
B = np.where(
np.logical_and(R_r == 1, R_m == 1),
np.where(
M == 1,
R + _cnv_h(B, k_b) - _cnv_h(R, k_b),
R + _cnv_v(B, k_b) - _cnv_v(R, k_b),
),
B,
)
RGB = tstack([R, G, B])
del R, G, B, k_b, R_r, B_r
if refining_step:
RGB = refining_step_Menon2007(RGB, tstack([R_m, G_m, B_m]), M)
del M, R_m, G_m, B_m
return RGB
demosaicing_CFA_Bayer_DDFAPD = demosaicing_CFA_Bayer_Menon2007
def refining_step_Menon2007(
RGB: ArrayLike, RGB_m: ArrayLike, M: ArrayLike
) -> NDArray:
"""
Perform the refining step on given *RGB* colourspace array.
Parameters
----------
RGB
*RGB* colourspace array.
RGB_m
*Bayer* CFA red, green and blue masks.
M
Estimation for the best directional reconstruction.
Returns
-------
:class:`numpy.ndarray`
Refined *RGB* colourspace array.
Examples
--------
>>> RGB = np.array(
... [[[0.30588236, 0.35686275, 0.3764706],
... [0.30980393, 0.36078432, 0.39411766],
... [0.29607844, 0.36078432, 0.40784314],
... [0.29803923, 0.37647060, 0.42352942]],
... [[0.30588236, 0.35686275, 0.3764706],
... [0.30980393, 0.36078432, 0.39411766],
... [0.29607844, 0.36078432, 0.40784314],
... [0.29803923, 0.37647060, 0.42352942]]])
>>> RGB_m = np.array(
... [[[0, 0, 1],
... [0, 1, 0],
... [0, 0, 1],
... [0, 1, 0]],
... [[0, 1, 0],
... [1, 0, 0],
... [0, 1, 0],
... [1, 0, 0]]])
>>> M = np.array(
... [[0, 1, 0, 1],
... [1, 0, 1, 0]])
>>> refining_step_Menon2007(RGB, RGB_m, M)
array([[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411765],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]],
<BLANKLINE>
[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]]])
"""
R, G, B = tsplit(RGB)
R_m, G_m, B_m = tsplit(RGB_m)
M = as_float_array(M)
del RGB, RGB_m
# Updating of the green component.
R_G = R - G
B_G = B - G
FIR = np.ones(3) / 3
B_G_m = np.where(
B_m == 1,
np.where(M == 1, _cnv_h(B_G, FIR), _cnv_v(B_G, FIR)),
0,
)
R_G_m = np.where(
R_m == 1,
np.where(M == 1, _cnv_h(R_G, FIR), _cnv_v(R_G, FIR)),
0,
)
del B_G, R_G
G = np.where(R_m == 1, R - R_G_m, G)
G = np.where(B_m == 1, B - B_G_m, G)
# Updating of the red and blue components in the green locations.
# Red rows.
R_r = np.transpose(np.any(R_m == 1, axis=1)[np.newaxis]) * np.ones(R.shape)
# Red columns.
R_c = np.any(R_m == 1, axis=0)[np.newaxis] * np.ones(R.shape)
# Blue rows.
B_r = np.transpose(np.any(B_m == 1, axis=1)[np.newaxis]) * np.ones(B.shape)
# Blue columns.
B_c = np.any(B_m == 1, axis=0)[np.newaxis] * np.ones(B.shape)
R_G = R - G
B_G = B - G
k_b = as_float_array([0.5, 0.0, 0.5])
R_G_m = np.where(
np.logical_and(G_m == 1, B_r == 1),
_cnv_v(R_G, k_b),
R_G_m,
)
R = np.where(np.logical_and(G_m == 1, B_r == 1), G + R_G_m, R)
R_G_m = np.where(
np.logical_and(G_m == 1, B_c == 1),
_cnv_h(R_G, k_b),
R_G_m,
)
R = np.where(np.logical_and(G_m == 1, B_c == 1), G + R_G_m, R)
del B_r, R_G_m, B_c, R_G
B_G_m = np.where(
np.logical_and(G_m == 1, R_r == 1),
_cnv_v(B_G, k_b),
B_G_m,
)
B = np.where(np.logical_and(G_m == 1, R_r == 1), G + B_G_m, B)
B_G_m = np.where(
np.logical_and(G_m == 1, R_c == 1),
_cnv_h(B_G, k_b),
B_G_m,
)
B = np.where(np.logical_and(G_m == 1, R_c == 1), G + B_G_m, B)
del B_G_m, R_r, R_c, G_m, B_G
# Updating of the red (blue) component in the blue (red) locations.
R_B = R - B
R_B_m = np.where(
B_m == 1,
np.where(M == 1, _cnv_h(R_B, FIR), _cnv_v(R_B, FIR)),
0,
)
R = np.where(B_m == 1, B + R_B_m, R)
R_B_m = np.where(
R_m == 1,
np.where(M == 1, _cnv_h(R_B, FIR), _cnv_v(R_B, FIR)),
0,
)
B = np.where(R_m == 1, R - R_B_m, B)
del R_B, R_B_m, R_m
return tstack([R, G, B])
| 28.053333
| 79
| 0.532319
|
4a131e020c330f61584dbe8efd48ab07a4663eb7
| 6,581
|
py
|
Python
|
src/services/stream/crunchyroll.py
|
alexmuch/holo
|
29cd5bf492104c4b68c0d7fe0e808ef4dae54bb9
|
[
"MIT"
] | null | null | null |
src/services/stream/crunchyroll.py
|
alexmuch/holo
|
29cd5bf492104c4b68c0d7fe0e808ef4dae54bb9
|
[
"MIT"
] | null | null | null |
src/services/stream/crunchyroll.py
|
alexmuch/holo
|
29cd5bf492104c4b68c0d7fe0e808ef4dae54bb9
|
[
"MIT"
] | null | null | null |
from logging import debug, info, warning, error, exception
import re
from datetime import datetime, timedelta
from .. import AbstractServiceHandler
from data.models import Episode, UnprocessedStream
class ServiceHandler(AbstractServiceHandler):
_show_url = "http://crunchyroll.com/{id}"
_show_re = re.compile("crunchyroll.com/([\w-]+)", re.I)
_episode_rss = "http://crunchyroll.com/{id}.rss"
_backup_rss = "http://crunchyroll.com/rss/anime"
_season_url = "http://crunchyroll.com/lineup"
def __init__(self):
super().__init__("crunchyroll", "Crunchyroll", False)
# Episode finding
def get_all_episodes(self, stream, **kwargs):
info("Getting live episodes for Crunchyroll/{}".format(stream.show_key))
episode_datas = self._get_feed_episodes(stream.show_key, **kwargs)
# Check data validity and digest
episodes = []
for episode_data in episode_datas:
if _is_valid_episode(episode_data, stream.show_key):
try:
episodes.append(_digest_episode(episode_data))
except:
exception("Problem digesting episode for Crunchyroll/{}".format(stream.show_key))
if len(episode_datas) > 0:
debug(" {} episodes found, {} valid".format(len(episode_datas), len(episodes)))
else:
debug(" No episodes found")
return episodes
def _get_feed_episodes(self, show_key, **kwargs):
"""
Always returns a list.
"""
info("Getting episodes for Crunchyroll/{}".format(show_key))
url = self._get_feed_url(show_key)
# Send request
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get latest show for Crunchyroll/{}".format(show_key))
return list()
# Parse RSS feed
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
return response.get("entries", list())
@classmethod
def _get_feed_url(cls, show_key):
# Sometimes shows don't have an RSS feed
# Use the backup global feed when it doesn't
if show_key is not None:
return cls._episode_rss.format(id=show_key)
else:
debug(" Using backup feed")
return cls._backup_rss
# Remote info getting
_title_fix = re.compile("(.*) Episodes", re.I)
_title_fix_fr = re.compile("(.*) Épisodes", re.I)
def get_stream_info(self, stream, **kwargs):
info("Getting stream info for Crunchyroll/{}".format(stream.show_key))
url = self._get_feed_url(stream.show_key)
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get feed")
return None
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
stream.name = response.feed.title
match = self._title_fix.match(stream.name)
if match:
stream.name = match.group(1)
match = self._title_fix_fr.match(stream.name)
if match:
stream.name = match.group(1)
return stream
def get_seasonal_streams(self, **kwargs):
debug("Getting season shows")
# Request page
response = self.request(self._season_url, html=True, **kwargs)
if response is None:
error("Failed to get seasonal streams page")
return list()
# Find sections (continuing simulcast, new simulcast, new catalog)
lists = response.find_all(class_="lineup-grid")
if len(lists) < 2:
error("Unsupported structure of lineup page")
return list()
elif len(lists) < 2 or len(lists) > 3:
warning("Unexpected number of lineup grids")
# Parse individual shows
# WARNING: Some may be dramas and there's nothing distinguishing them from anime
show_elements = lists[1].find_all(class_="element-lineup-anime")
raw_streams = list()
for show in show_elements:
title = show["title"]
if "to be announced" not in title.lower():
debug(" Show: {}".format(title))
url = show["href"]
debug(" URL: {}".format(url))
url_match = self._show_re.search(url)
if not url_match:
error("Failed to parse show URL: {}".format(url))
continue
key = url_match.group(1)
debug(" Key: {}".format(key))
remote_offset, display_offset = self._get_stream_info(key)
raw_stream = UnprocessedStream(self.key, key, None, title, remote_offset, display_offset)
raw_streams.append(raw_stream)
return raw_streams
def _get_stream_info(self, show_key):
#TODO: load show page and figure out offsets based on contents
return 0, 0
# Local info formatting
def get_stream_link(self, stream):
# Just going to assume it's the correct service
return self._show_url.format(id=stream.show_key)
def extract_show_key(self, url):
match = self._show_re.search(url)
if match:
return match.group(1)
return None
# Episode feeds
def _verify_feed(feed):
debug("Verifying feed")
if feed.bozo:
debug(" Feed was malformed")
return False
if "crunchyroll" not in feed.namespaces or feed.namespaces["crunchyroll"] != "http://www.crunchyroll.com/rss":
debug(" Crunchyroll namespace not found or invalid")
return False
if feed.feed.language != "en-us":
debug(" Language not en-us")
return False
debug(" Feed verified")
return True
def _is_valid_episode(feed_episode, show_id):
# We don't want non-episodes (PVs, VA interviews, etc.)
if feed_episode.get("crunchyroll_isclip", False) or not hasattr(feed_episode, "crunchyroll_episodenumber"):
debug("Is PV, ignoring")
return False
# Don't check really old episodes
episode_date = datetime(*feed_episode.published_parsed[:6])
date_diff = datetime.utcnow() - episode_date
if date_diff >= timedelta(days=2):
debug(" Episode too old")
return False
return True
_episode_name_correct = re.compile("Episode \d+ - (.*)")
_episode_count_fix = re.compile("([0-9]+)[abc]?", re.I)
def _digest_episode(feed_episode):
debug("Digesting episode")
# Get data
num_match = _episode_count_fix.match(feed_episode.crunchyroll_episodenumber)
if num_match:
num = int(num_match.group(1))
else:
warning("Unknown episode number format \"{}\"".format(feed_episode.crunchyroll_episodenumber))
num = 0
debug(" num={}".format(num))
name = feed_episode.title
match = _episode_name_correct.match(name)
if match:
debug(" Corrected title from \"{}\"".format(name))
name = match.group(1)
debug(" name={}".format(name))
link = feed_episode.link
debug(" link={}".format(link))
date = feed_episode.published_parsed
debug(" date={}".format(date))
return Episode(num, name, link, date)
_slug_regex = re.compile("crunchyroll.com/([a-z0-9-]+)/", re.I)
def _get_slug(episode_link):
match = _slug_regex.search(episode_link)
if match:
return match.group(1)
return None
# Season page
| 30.467593
| 111
| 0.711746
|
4a131f6888a2f1e54db9b7aa69c175046cde6891
| 985
|
py
|
Python
|
opennre/model/pairwise_ranking_loss.py
|
igorvlnascimento/DeepREF
|
0fed8120571e44e12ee3d1861289bc101c0a275f
|
[
"MIT"
] | null | null | null |
opennre/model/pairwise_ranking_loss.py
|
igorvlnascimento/DeepREF
|
0fed8120571e44e12ee3d1861289bc101c0a275f
|
[
"MIT"
] | null | null | null |
opennre/model/pairwise_ranking_loss.py
|
igorvlnascimento/DeepREF
|
0fed8120571e44e12ee3d1861289bc101c0a275f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class PairwiseRankingLoss(nn.Module):
def __init__(self,
margin_positive=2.5,
margin_negative=0.5,
gamma=2.0):
super().__init__()
self.margin_positive = margin_positive
self.margin_negative = margin_negative
self.gamma = gamma
def forward(self, scores, labels):
mask = F.one_hot(labels, scores.shape[-1])
positive_scores = scores.masked_fill(mask.eq(0), float('-inf')).max(dim=1)[0]
negative_scores = scores.masked_fill(mask.eq(1), float('-inf')).max(dim=1)[0]
positive_loss = torch.log1p(torch.exp(self.gamma*(self.margin_positive-positive_scores)))
positive_loss[labels == 0] = 0.0 # exclusive `Other` loss
negative_loss = torch.log1p(torch.exp(self.gamma*(self.margin_negative+negative_scores)))
loss = torch.mean(positive_loss + negative_loss)
return loss
| 42.826087
| 97
| 0.649746
|
4a131f7b91fb30c765f5ef1239a03b4338992bb2
| 297
|
py
|
Python
|
tests/mock/oauth_claims.py
|
MisterWil/python-abode
|
4ffce2314ed7e2c5d48a2c2758fddaef440b05ad
|
[
"MIT"
] | 48
|
2017-08-10T21:32:50.000Z
|
2021-08-15T05:09:58.000Z
|
tests/mock/oauth_claims.py
|
MisterWil/python-abode
|
4ffce2314ed7e2c5d48a2c2758fddaef440b05ad
|
[
"MIT"
] | 81
|
2017-08-10T21:39:40.000Z
|
2022-01-16T18:43:08.000Z
|
tests/mock/oauth_claims.py
|
MisterWil/python-abode
|
4ffce2314ed7e2c5d48a2c2758fddaef440b05ad
|
[
"MIT"
] | 28
|
2017-08-17T21:20:12.000Z
|
2022-01-16T12:22:07.000Z
|
"""Mock Abode Claims Response."""
from tests.mock import OAUTH_TOKEN
def get_response_ok(oauth_token=OAUTH_TOKEN):
"""Return the oauth2 claims token."""
return '''
{
"token_type":"Bearer",
"access_token":"''' + oauth_token + '''",
"expires_in":3600
}'''
| 21.214286
| 49
| 0.599327
|
4a131fdf5d094d681a5d50a3ac54815f8fadae35
| 9,979
|
py
|
Python
|
thingsboard_gateway/tb_client/tb_gateway_mqtt.py
|
netcadlabs/thingsboard-gateway
|
6c4bd1a98627aaf1aba5011297d25c3fbc0bed0d
|
[
"Apache-2.0"
] | null | null | null |
thingsboard_gateway/tb_client/tb_gateway_mqtt.py
|
netcadlabs/thingsboard-gateway
|
6c4bd1a98627aaf1aba5011297d25c3fbc0bed0d
|
[
"Apache-2.0"
] | null | null | null |
thingsboard_gateway/tb_client/tb_gateway_mqtt.py
|
netcadlabs/thingsboard-gateway
|
6c4bd1a98627aaf1aba5011297d25c3fbc0bed0d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from simplejson import dumps
from thingsboard_gateway.tb_client.tb_device_mqtt import TBDeviceMqttClient
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
GATEWAY_ATTRIBUTES_TOPIC = "v1/gateway/attributes"
GATEWAY_ATTRIBUTES_REQUEST_TOPIC = "v1/gateway/attributes/request"
GATEWAY_ATTRIBUTES_RESPONSE_TOPIC = "v1/gateway/attributes/response"
GATEWAY_MAIN_TOPIC = "v1/gateway/"
GATEWAY_RPC_TOPIC = "v1/gateway/rpc"
GATEWAY_RPC_RESPONSE_TOPIC = "v1/gateway/rpc/response"
log = logging.getLogger("tb_connection")
class TBGatewayAPI:
pass
class TBGatewayMqttClient(TBDeviceMqttClient):
def __init__(self, host, port, token=None, gateway=None, quality_of_service=1):
super().__init__(host, port, token, quality_of_service)
self.quality_of_service = quality_of_service
self.__max_sub_id = 0
self.__sub_dict = {}
self.__connected_devices = set("*")
self.devices_server_side_rpc_request_handler = None
self._client.on_connect = self._on_connect
self._client.on_message = self._on_message
self._client.on_subscribe = self._on_subscribe
self._client._on_unsubscribe = self._on_unsubscribe
self._gw_subscriptions = {}
self.gateway = gateway
def _on_connect(self, client, userdata, flags, result_code, *extra_params):
super()._on_connect(client, userdata, flags, result_code, *extra_params)
if result_code == 0:
self._gw_subscriptions[int(self._client.subscribe(GATEWAY_ATTRIBUTES_TOPIC, qos=1)[1])] = GATEWAY_ATTRIBUTES_TOPIC
self._gw_subscriptions[int(self._client.subscribe(GATEWAY_ATTRIBUTES_RESPONSE_TOPIC, qos=1)[1])] = GATEWAY_ATTRIBUTES_RESPONSE_TOPIC
self._gw_subscriptions[int(self._client.subscribe(GATEWAY_RPC_TOPIC, qos=1)[1])] = GATEWAY_RPC_TOPIC
# self._gw_subscriptions[int(self._client.subscribe(GATEWAY_RPC_RESPONSE_TOPIC)[1])] = GATEWAY_RPC_RESPONSE_TOPIC
def _on_subscribe(self, client, userdata, mid, granted_qos):
subscription = self._gw_subscriptions.get(mid)
if subscription is not None:
if mid == 128:
log.error("Service subscription to topic %s - failed.", subscription)
del self._gw_subscriptions[mid]
else:
log.debug("Service subscription to topic %s - successfully completed.", subscription)
del self._gw_subscriptions[mid]
def _on_unsubscribe(self, *args):
log.debug(args)
def get_subscriptions_in_progress(self):
return True if self._gw_subscriptions else False
def _on_message(self, client, userdata, message):
content = TBUtility.decode(message)
super()._on_decoded_message(content, message)
self._on_decoded_message(content, message)
def _on_decoded_message(self, content, message):
if message.topic.startswith(GATEWAY_ATTRIBUTES_RESPONSE_TOPIC):
with self._lock:
req_id = content["id"]
# pop callback and use it
if self._attr_request_dict[req_id]:
self._attr_request_dict.pop(req_id)(content, None)
else:
log.error("Unable to find callback to process attributes response from TB")
elif message.topic == GATEWAY_ATTRIBUTES_TOPIC:
with self._lock:
# callbacks for everything
if self.__sub_dict.get("*|*"):
for callback in self.__sub_dict["*|*"]:
self.__sub_dict["*|*"][callback](content)
# callbacks for device. in this case callback executes for all attributes in message
target = content["device"] + "|*"
if self.__sub_dict.get(target):
for callback in self.__sub_dict[target]:
self.__sub_dict[target][callback](content)
# callback for atr. in this case callback executes for all attributes in message
targets = [content["device"] + "|" + attribute for attribute in content["data"]]
for target in targets:
if self.__sub_dict.get(target):
for sub_id in self.__sub_dict[target]:
self.__sub_dict[target][sub_id](content)
elif message.topic == GATEWAY_RPC_TOPIC:
if self.devices_server_side_rpc_request_handler:
self.devices_server_side_rpc_request_handler(self, content)
def __request_attributes(self, device, keys, callback, type_is_client=False):
if not keys:
log.error("There are no keys to request")
return False
keys_str = ""
for key in keys:
keys_str += key + ","
keys_str = keys_str[:len(keys_str) - 1]
ts_in_millis = int(round(time.time() * 1000))
attr_request_number = self._add_attr_request_callback(callback)
msg = {"key": keys_str,
"device": device,
"client": type_is_client,
"id": attr_request_number}
info = self._client.publish(GATEWAY_ATTRIBUTES_REQUEST_TOPIC, dumps(msg), 1)
self._add_timeout(attr_request_number, ts_in_millis + 30000)
return info
def gw_request_shared_attributes(self, device_name, keys, callback):
return self.__request_attributes(device_name, keys, callback, False)
def gw_request_client_attributes(self, device_name, keys, callback):
return self.__request_attributes(device_name, keys, callback, True)
def gw_send_attributes(self, device, attributes, quality_of_service=1):
return self.publish_data({device: attributes}, GATEWAY_MAIN_TOPIC + "attributes", quality_of_service)
def gw_send_telemetry(self, device, telemetry, quality_of_service=1):
if not isinstance(telemetry, list) and not (isinstance(telemetry, dict) and telemetry.get("ts") is not None):
telemetry = [telemetry]
return self.publish_data({device: telemetry}, GATEWAY_MAIN_TOPIC + "telemetry", quality_of_service, )
def gw_connect_device(self, device_name, device_type, extra_kv=None):
payload = {"device": device_name, "type": device_type}
if extra_kv is not None:
for key in extra_kv:
if str(extra_kv) is not str("device") and str(extra_kv) is not str("type") and extra_kv.get(key, None) is not None:
payload[key] = extra_kv[key]
info = self._client.publish(topic=GATEWAY_MAIN_TOPIC + "connect", payload=dumps(payload), qos=self.quality_of_service)
self.__connected_devices.add(device_name)
# if self.gateway:
# self.gateway.on_device_connected(device_name, self.__devices_server_side_rpc_request_handler)
log.debug("Connected device %s", device_name)
return info
def gw_disconnect_device(self, device_name):
info = self._client.publish(topic=GATEWAY_MAIN_TOPIC + "disconnect", payload=dumps({"device": device_name}),
qos=self.quality_of_service)
self.__connected_devices.remove(device_name)
# if self.gateway:
# self.gateway.on_device_disconnected(self, device_name)
log.debug("Disconnected device %s", device_name)
return info
def gw_subscribe_to_all_attributes(self, callback):
return self.gw_subscribe_to_attribute("*", "*", callback)
def gw_subscribe_to_all_device_attributes(self, device, callback):
return self.gw_subscribe_to_attribute(device, "*", callback)
def gw_subscribe_to_attribute(self, device, attribute, callback):
if device not in self.__connected_devices:
log.error("Device %s is not connected", device)
return False
with self._lock:
self.__max_sub_id += 1
key = device + "|" + attribute
if key not in self.__sub_dict:
self.__sub_dict.update({key: {self.__max_sub_id: callback}})
else:
self.__sub_dict[key].update({self.__max_sub_id: callback})
log.info("Subscribed to %s with id %i", key, self.__max_sub_id)
return self.__max_sub_id
def gw_unsubscribe(self, subscription_id):
with self._lock:
for attribute in self.__sub_dict:
if self.__sub_dict[attribute].get(subscription_id):
del self.__sub_dict[attribute][subscription_id]
log.info("Unsubscribed from %s, subscription id %i", attribute, subscription_id)
if subscription_id == '*':
self.__sub_dict = {}
def gw_set_server_side_rpc_request_handler(self, handler):
self.devices_server_side_rpc_request_handler = handler
def gw_send_rpc_reply(self, device, req_id, resp, quality_of_service):
if quality_of_service is None:
quality_of_service = self.quality_of_service
if quality_of_service not in (0, 1):
log.error("Quality of service (qos) value must be 0 or 1")
return None
info = self._client.publish(GATEWAY_RPC_TOPIC,
dumps({"device": device, "id": req_id, "data": resp}),
qos=quality_of_service)
return info
| 48.441748
| 144
| 0.659685
|
4a132156956254692bf097c75ea6d3b87c38222d
| 1,732
|
py
|
Python
|
tests/unittests/test_search_space.py
|
Jerryzcn/autogluon
|
778cfa23e5695b44fc3c7a5da0cbc764917d80a2
|
[
"Apache-2.0"
] | null | null | null |
tests/unittests/test_search_space.py
|
Jerryzcn/autogluon
|
778cfa23e5695b44fc3c7a5da0cbc764917d80a2
|
[
"Apache-2.0"
] | null | null | null |
tests/unittests/test_search_space.py
|
Jerryzcn/autogluon
|
778cfa23e5695b44fc3c7a5da0cbc764917d80a2
|
[
"Apache-2.0"
] | 1
|
2021-02-04T23:29:47.000Z
|
2021-02-04T23:29:47.000Z
|
import autogluon as ag
@ag.obj(
name=ag.space.Categorical('auto', 'gluon'),
)
class myobj:
def __init__(self, name):
self.name = name
@ag.func(
framework=ag.space.Categorical('mxnet', 'pytorch'),
)
def myfunc(framework):
return framework
@ag.args(
a=ag.space.Real(1e-3, 1e-2, log=True),
b=ag.space.Real(1e-3, 1e-2),
c=ag.space.Int(1, 10),
d=ag.space.Categorical('a', 'b', 'c', 'd'),
e=ag.space.Bool(),
f=ag.space.List(
ag.space.Int(1, 2),
ag.space.Categorical(4, 5),
),
g=ag.space.Dict(
a=ag.Real(0, 10),
obj=myobj(),
),
h=ag.space.Categorical('test', myobj()),
i = myfunc(),
)
def train_fn(args, reporter):
a, b, c, d, e, f, g, h, i = args.a, args.b, args.c, args.d, args.e, \
args.f, args.g, args.h, args.i
assert a <= 1e-2 and a >= 1e-3
assert b <= 1e-2 and b >= 1e-3
assert c <= 10 and c >= 1
assert d in ['a', 'b', 'c', 'd']
assert e in [True, False]
assert f[0] in [1, 2]
assert f[1] in [4, 5]
assert g['a'] <= 10 and g['a'] >= 0
assert g.obj.name in ['auto', 'gluon']
assert hasattr(h, 'name') or h == 'test'
assert i in ['mxnet', 'pytorch']
reporter(epoch=0, accuracy=0)
def test_search_space():
scheduler = ag.scheduler.FIFOScheduler(train_fn,
resource={'num_cpus': 4, 'num_gpus': 0},
num_trials=10,
reward_attr='accuracy',
time_attr='epoch',
checkpoint=None)
scheduler.run()
scheduler.join_jobs()
| 28.866667
| 83
| 0.48903
|
4a1321778296a6f3f88a46d32d703f26a780cf1a
| 1,421
|
py
|
Python
|
coupons/migrations/0003_auto_20150416_0617.py
|
jelukas/django-coupons
|
b4de97570720c30ca034fdc8c121ad1645e8cb53
|
[
"BSD-3-Clause"
] | null | null | null |
coupons/migrations/0003_auto_20150416_0617.py
|
jelukas/django-coupons
|
b4de97570720c30ca034fdc8c121ad1645e8cb53
|
[
"BSD-3-Clause"
] | null | null | null |
coupons/migrations/0003_auto_20150416_0617.py
|
jelukas/django-coupons
|
b4de97570720c30ca034fdc8c121ad1645e8cb53
|
[
"BSD-3-Clause"
] | 1
|
2021-08-30T10:50:41.000Z
|
2021-08-30T10:50:41.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('coupons', '0002_coupon_valid_until'),
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255, verbose_name='Name')),
('description', models.TextField(verbose_name='Description', blank=True)),
],
options={
'ordering': ['name'],
'verbose_name': 'Campaign',
'verbose_name_plural': 'Campaigns',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='coupon',
name='campaign',
field=models.ForeignKey(related_name='coupons', verbose_name='Campaign', blank=True, to='coupons.Campaign', null=True, on_delete=models.deletion.SET_NULL),
preserve_default=True,
),
migrations.AlterField(
model_name='coupon',
name='valid_until',
field=models.DateTimeField(help_text='Leave empty for coupons that never expire', null=True, verbose_name='Valid until', blank=True),
),
]
| 35.525
| 167
| 0.582688
|
4a1321ed39b53b8928ce73772867715421a373f7
| 10,407
|
py
|
Python
|
backend/env/lib/python3.8/site-packages/jedi/inference/references.py
|
lubitelpospat/CFM-source
|
4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03
|
[
"MIT"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
backend/env/lib/python3.8/site-packages/jedi/inference/references.py
|
lubitelpospat/CFM-source
|
4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03
|
[
"MIT"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
backend/env/lib/python3.8/site-packages/jedi/inference/references.py
|
lubitelpospat/CFM-source
|
4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03
|
[
"MIT"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import os
import re
from parso import python_bytes_to_unicode
from jedi._compatibility import FileNotFoundError
from jedi.debug import dbg
from jedi.file_io import KnownContentFileIO
from jedi.inference.imports import SubModuleName, load_module_from_path
from jedi.inference.filters import ParserTreeFilter
from jedi.inference.gradual.conversion import convert_names
_IGNORE_FOLDERS = ('.tox', '.venv', 'venv', '__pycache__')
_OPENED_FILE_LIMIT = 2000
"""
Stats from a 2016 Lenovo Notebook running Linux:
With os.walk, it takes about 10s to scan 11'000 files (without filesystem
caching). Once cached it only takes 5s. So it is expected that reading all
those files might take a few seconds, but not a lot more.
"""
_PARSED_FILE_LIMIT = 30
"""
For now we keep the amount of parsed files really low, since parsing might take
easily 100ms for bigger files.
"""
def _resolve_names(definition_names, avoid_names=()):
for name in definition_names:
if name in avoid_names:
# Avoiding recursions here, because goto on a module name lands
# on the same module.
continue
if not isinstance(name, SubModuleName):
# SubModuleNames are not actually existing names but created
# names when importing something like `import foo.bar.baz`.
yield name
if name.api_type == 'module':
for n in _resolve_names(name.goto(), definition_names):
yield n
def _dictionarize(names):
return dict(
(n if n.tree_name is None else n.tree_name, n)
for n in names
)
def _find_defining_names(module_context, tree_name):
found_names = _find_names(module_context, tree_name)
for name in list(found_names):
# Convert from/to stubs, because those might also be usages.
found_names |= set(convert_names(
[name],
only_stubs=not name.get_root_context().is_stub(),
prefer_stub_to_compiled=False
))
found_names |= set(_find_global_variables(found_names, tree_name.value))
for name in list(found_names):
if name.api_type == 'param' or name.tree_name is None \
or name.tree_name.parent.type == 'trailer':
continue
found_names |= set(_add_names_in_same_context(name.parent_context, name.string_name))
return set(_resolve_names(found_names))
def _find_names(module_context, tree_name):
name = module_context.create_name(tree_name)
found_names = set(name.goto())
found_names.add(name)
return set(_resolve_names(found_names))
def _add_names_in_same_context(context, string_name):
if context.tree_node is None:
return
until_position = None
while True:
filter_ = ParserTreeFilter(
parent_context=context,
until_position=until_position,
)
names = set(filter_.get(string_name))
if not names:
break
for name in names:
yield name
ordered = sorted(names, key=lambda x: x.start_pos)
until_position = ordered[0].start_pos
def _find_global_variables(names, search_name):
for name in names:
if name.tree_name is None:
continue
module_context = name.get_root_context()
try:
method = module_context.get_global_filter
except AttributeError:
continue
else:
for global_name in method().get(search_name):
yield global_name
c = module_context.create_context(global_name.tree_name)
for n in _add_names_in_same_context(c, global_name.string_name):
yield n
def find_references(module_context, tree_name, only_in_module=False):
inf = module_context.inference_state
search_name = tree_name.value
# We disable flow analysis, because if we have ifs that are only true in
# certain cases, we want both sides.
try:
inf.flow_analysis_enabled = False
found_names = _find_defining_names(module_context, tree_name)
finally:
inf.flow_analysis_enabled = True
found_names_dct = _dictionarize(found_names)
module_contexts = [module_context]
if not only_in_module:
module_contexts.extend(
m for m in set(d.get_root_context() for d in found_names)
if m != module_context and m.tree_node is not None
)
# For param no search for other modules is necessary.
if only_in_module or any(n.api_type == 'param' for n in found_names):
potential_modules = module_contexts
else:
potential_modules = get_module_contexts_containing_name(
inf,
module_contexts,
search_name,
)
non_matching_reference_maps = {}
for module_context in potential_modules:
for name_leaf in module_context.tree_node.get_used_names().get(search_name, []):
new = _dictionarize(_find_names(module_context, name_leaf))
if any(tree_name in found_names_dct for tree_name in new):
found_names_dct.update(new)
for tree_name in new:
for dct in non_matching_reference_maps.get(tree_name, []):
# A reference that was previously searched for matches
# with a now found name. Merge.
found_names_dct.update(dct)
try:
del non_matching_reference_maps[tree_name]
except KeyError:
pass
else:
for name in new:
non_matching_reference_maps.setdefault(name, []).append(new)
result = found_names_dct.values()
if only_in_module:
return [n for n in result if n.get_root_context() == module_context]
return result
def _check_fs(inference_state, file_io, regex):
try:
code = file_io.read()
except FileNotFoundError:
return None
code = python_bytes_to_unicode(code, errors='replace')
if not regex.search(code):
return None
new_file_io = KnownContentFileIO(file_io.path, code)
m = load_module_from_path(inference_state, new_file_io)
if m.is_compiled():
return None
return m.as_context()
def gitignored_lines(folder_io, file_io):
ignored_paths = set()
ignored_names = set()
for l in file_io.read().splitlines():
if not l or l.startswith(b'#'):
continue
p = l.decode('utf-8', 'ignore')
if p.startswith('/'):
name = p[1:]
if name.endswith(os.path.sep):
name = name[:-1]
ignored_paths.add(os.path.join(folder_io.path, name))
else:
ignored_names.add(p)
return ignored_paths, ignored_names
def recurse_find_python_folders_and_files(folder_io, except_paths=()):
except_paths = set(except_paths)
for root_folder_io, folder_ios, file_ios in folder_io.walk():
# Delete folders that we don't want to iterate over.
for file_io in file_ios:
path = file_io.path
if path.endswith('.py') or path.endswith('.pyi'):
if path not in except_paths:
yield None, file_io
if path.endswith('.gitignore'):
ignored_paths, ignored_names = \
gitignored_lines(root_folder_io, file_io)
except_paths |= ignored_paths
folder_ios[:] = [
folder_io
for folder_io in folder_ios
if folder_io.path not in except_paths
and folder_io.get_base_name() not in _IGNORE_FOLDERS
]
for folder_io in folder_ios:
yield folder_io, None
def recurse_find_python_files(folder_io, except_paths=()):
for folder_io, file_io in recurse_find_python_folders_and_files(folder_io, except_paths):
if file_io is not None:
yield file_io
def _find_python_files_in_sys_path(inference_state, module_contexts):
sys_path = inference_state.get_sys_path()
except_paths = set()
yielded_paths = [m.py__file__() for m in module_contexts]
for module_context in module_contexts:
file_io = module_context.get_value().file_io
if file_io is None:
continue
folder_io = file_io.get_parent_folder()
while True:
path = folder_io.path
if not any(path.startswith(p) for p in sys_path) or path in except_paths:
break
for file_io in recurse_find_python_files(folder_io, except_paths):
if file_io.path not in yielded_paths:
yield file_io
except_paths.add(path)
folder_io = folder_io.get_parent_folder()
def get_module_contexts_containing_name(inference_state, module_contexts, name,
limit_reduction=1):
"""
Search a name in the directories of modules.
:param limit_reduction: Divides the limits on opening/parsing files by this
factor.
"""
# Skip non python modules
for module_context in module_contexts:
if module_context.is_compiled():
continue
yield module_context
# Very short names are not searched in other modules for now to avoid lots
# of file lookups.
if len(name) <= 2:
return
file_io_iterator = _find_python_files_in_sys_path(inference_state, module_contexts)
for x in search_in_file_ios(inference_state, file_io_iterator, name,
limit_reduction=limit_reduction):
yield x # Python 2...
def search_in_file_ios(inference_state, file_io_iterator, name, limit_reduction=1):
parse_limit = _PARSED_FILE_LIMIT / limit_reduction
open_limit = _OPENED_FILE_LIMIT / limit_reduction
file_io_count = 0
parsed_file_count = 0
regex = re.compile(r'\b' + re.escape(name) + r'\b')
for file_io in file_io_iterator:
file_io_count += 1
m = _check_fs(inference_state, file_io, regex)
if m is not None:
parsed_file_count += 1
yield m
if parsed_file_count >= parse_limit:
dbg('Hit limit of parsed files: %s', parse_limit)
break
if file_io_count >= open_limit:
dbg('Hit limit of opened files: %s', open_limit)
break
| 34.69
| 93
| 0.644854
|
4a132226c46c4389e8a645bcae884cf3c09f12f1
| 366
|
py
|
Python
|
main.py
|
victhepythonista/fireworks-stimulation
|
44ff44f90ec4cbd29d0148163dd60175ad809e1f
|
[
"MIT"
] | null | null | null |
main.py
|
victhepythonista/fireworks-stimulation
|
44ff44f90ec4cbd29d0148163dd60175ad809e1f
|
[
"MIT"
] | null | null | null |
main.py
|
victhepythonista/fireworks-stimulation
|
44ff44f90ec4cbd29d0148163dd60175ad809e1f
|
[
"MIT"
] | null | null | null |
import pygame
from backend import *
pygame.init()
bg = pygame.image.load("nightsky.jpeg")
class FireworksScreen(Screen):
def __init__(self):
Screen.__init__(self,(900,500))
self.fw_manager = FireworksManager()
def display_widgets(self):
self.window.blit(bg, (0,0))
self.fw_manager.show(self.window)
pass
FireworksScreen().show()
| 18.3
| 40
| 0.693989
|
4a1323448f7fd664667913f66e3aa8237dc01a31
| 4,681
|
py
|
Python
|
src/audio.py
|
samx81/End-to-end-ASR-Pytorch
|
16e565008031c73e5b18f890c77e830440f3d101
|
[
"MIT"
] | null | null | null |
src/audio.py
|
samx81/End-to-end-ASR-Pytorch
|
16e565008031c73e5b18f890c77e830440f3d101
|
[
"MIT"
] | null | null | null |
src/audio.py
|
samx81/End-to-end-ASR-Pytorch
|
16e565008031c73e5b18f890c77e830440f3d101
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
class CMVN(torch.jit.ScriptModule):
__constants__ = ["mode", "dim", "eps"]
def __init__(self, mode="global", dim=2, eps=1e-10):
# `torchaudio.load()` loads audio with shape [channel, feature_dim, time]
# so perform normalization on dim=2 by default
super(CMVN, self).__init__()
if mode != "global":
raise NotImplementedError(
"Only support global mean variance normalization.")
self.mode = mode
self.dim = dim
self.eps = eps
@torch.jit.script_method
def forward(self, x):
if self.mode == "global":
return (x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std(self.dim, keepdim=True))
def extra_repr(self):
return "mode={}, dim={}, eps={}".format(self.mode, self.dim, self.eps)
class Delta(torch.jit.ScriptModule):
__constants__ = ["order", "window_size", "padding"]
def __init__(self, order=1, window_size=2):
# Reference:
# https://kaldi-asr.org/doc/feature-functions_8cc_source.html
# https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_audio.py
super(Delta, self).__init__()
self.order = order
self.window_size = window_size
filters = self._create_filters(order, window_size)
self.register_buffer("filters", filters)
self.padding = (0, (filters.shape[-1] - 1) // 2)
@torch.jit.script_method
def forward(self, x):
# Unsqueeze batch dim
x = x.unsqueeze(0)
return F.conv2d(x, weight=self.filters, padding=self.padding)[0]
# TODO(WindQAQ): find more elegant way to create `scales`
def _create_filters(self, order, window_size):
scales = [[1.0]]
for i in range(1, order + 1):
prev_offset = (len(scales[i-1]) - 1) // 2
curr_offset = prev_offset + window_size
curr = [0] * (len(scales[i-1]) + 2 * window_size)
normalizer = 0.0
for j in range(-window_size, window_size + 1):
normalizer += j * j
for k in range(-prev_offset, prev_offset + 1):
curr[j+k+curr_offset] += (j * scales[i-1][k+prev_offset])
curr = [x / normalizer for x in curr]
scales.append(curr)
max_len = len(scales[-1])
for i, scale in enumerate(scales[:-1]):
padding = (max_len - len(scale)) // 2
scales[i] = [0] * padding + scale + [0] * padding
return torch.tensor(scales).unsqueeze(1).unsqueeze(1)
def extra_repr(self):
return "order={}, window_size={}".format(self.order, self.window_size)
class Postprocess(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
# [channel, feature_dim, time] -> [time, channel, feature_dim]
x = x.permute(2, 0, 1)
# [time, channel, feature_dim] -> [time, feature_dim * channel]
return x.reshape(x.size(0), -1).detach()
# TODO(Windqaq): make this scriptable
class ExtractAudioFeature(nn.Module):
def __init__(self, mode="fbank", num_mel_bins=40, **kwargs):
super(ExtractAudioFeature, self).__init__()
self.mode = mode
self.extract_fn = torchaudio.compliance.kaldi.fbank if mode == "fbank" else torchaudio.compliance.kaldi.mfcc
self.num_mel_bins = num_mel_bins
self.kwargs = kwargs
def forward(self, filepath):
waveform, sample_rate = torchaudio.load(filepath)
y = self.extract_fn(waveform,
num_mel_bins=self.num_mel_bins,
channel=-1,
sample_frequency=sample_rate,
**self.kwargs)
return y.transpose(0, 1).unsqueeze(0).detach()
def extra_repr(self):
return "mode={}, num_mel_bins={}".format(self.mode, self.num_mel_bins)
def create_transform(audio_config):
feat_type = audio_config.pop("feat_type") ## Pop feat_type from `config` dict
feat_dim = audio_config.pop("feat_dim")
delta_order = audio_config.pop("delta_order", 0)
delta_window_size = audio_config.pop("delta_window_size", 2)
apply_cmvn = audio_config.pop("apply_cmvn")
transforms = [ExtractAudioFeature(feat_type, feat_dim, **audio_config)]
if delta_order >= 1:
transforms.append(Delta(delta_order, delta_window_size))
if apply_cmvn:
transforms.append(CMVN())
transforms.append(Postprocess())
return nn.Sequential(*transforms), feat_dim * (delta_order + 1)
## RETURN audio_transform, feat_dim
| 34.674074
| 116
| 0.616749
|
4a1324d9c2a490758dc29449b6542614e6f0cf12
| 1,201
|
py
|
Python
|
Project/src/Modules/House/Family/Reolink/reolink_device.py
|
DBrianKimmel/PyHouse
|
a100fc67761a22ae47ed6f21f3c9464e2de5d54f
|
[
"MIT"
] | 3
|
2016-11-16T00:37:58.000Z
|
2019-11-10T13:10:19.000Z
|
Project/src/Modules/House/Family/Reolink/reolink_device.py
|
DBrianKimmel/PyHouse
|
a100fc67761a22ae47ed6f21f3c9464e2de5d54f
|
[
"MIT"
] | null | null | null |
Project/src/Modules/House/Family/Reolink/reolink_device.py
|
DBrianKimmel/PyHouse
|
a100fc67761a22ae47ed6f21f3c9464e2de5d54f
|
[
"MIT"
] | 1
|
2020-07-19T22:06:52.000Z
|
2020-07-19T22:06:52.000Z
|
"""
@name: /home/briank/workspace/PyHouse/Project/src/Modules/House/Family/Reolink/reolink_device.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Jan 26, 2020
@summary:
"""
__updated__ = '2020-01-26'
__version_info__ = (20, 1, 26)
__version__ = '.'.join(map(str, __version_info__))
# Import system type stuff
# Import PyMh files
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.reolink_device ')
class Api:
"""
These are the public methods available to use Devices from any family.
"""
m_plm_list = []
m_hub_list = []
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj):
# p_pyhouse_obj.House._Commands['insteon'] = {}
self.m_pyhouse_obj = p_pyhouse_obj
LOG.info('Initialized')
def LoadConfig(self):
"""
"""
def Start(self):
"""
"""
def SaveConfig(self):
"""
"""
def Stop(self):
_x = PrettyFormatAny.form(self.m_pyhouse_obj, 'pyhouse')
# ## END DBK
| 21.836364
| 101
| 0.64363
|
4a1324ffbfd60694de7b5a6256905c88f50544c3
| 3,089
|
py
|
Python
|
tests/unit/lib/utils/test_hash.py
|
stackchain/aws-sam-cli
|
5690348deb2193c653ba361bc0fc358dd410b3eb
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/unit/lib/utils/test_hash.py
|
stackchain/aws-sam-cli
|
5690348deb2193c653ba361bc0fc358dd410b3eb
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-10-05T17:15:43.000Z
|
2020-10-05T17:15:43.000Z
|
tests/unit/lib/utils/test_hash.py
|
misk0/aws-sam-cli
|
a7cf9f025bc6da2bd388fee35dd07da584362047
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
from samcli.lib.utils.hash import dir_checksum, str_checksum
class TestHash(TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_dir_hash_independent_of_location(self):
temp_dir1 = os.path.join(self.temp_dir, "temp-dir-1")
os.mkdir(temp_dir1)
with open(os.path.join(temp_dir1, "test-file"), "w+") as f:
f.write("Testfile")
checksum1 = dir_checksum(temp_dir1)
temp_dir2 = shutil.move(temp_dir1, os.path.join(self.temp_dir, "temp-dir-2"))
checksum2 = dir_checksum(temp_dir2)
self.assertEqual(checksum1, checksum2)
def test_dir_hash_independent_of_file_order(self):
file1 = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
file1.write(b"Testfile")
file1.close()
file2 = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
file2.write(b"Testfile")
file2.close()
dir_checksums = {}
with patch("os.walk") as mockwalk:
mockwalk.return_value = [
(self.temp_dir, (), (file1.name, file2.name,),),
]
dir_checksums["first"] = dir_checksum(self.temp_dir)
with patch("os.walk") as mockwalk:
mockwalk.return_value = [
(self.temp_dir, (), (file2.name, file1.name,),),
]
dir_checksums["second"] = dir_checksum(self.temp_dir)
self.assertEqual(dir_checksums["first"], dir_checksums["second"])
def test_dir_hash_same_contents_diff_file_per_directory(self):
_file = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
_file.write(b"Testfile")
_file.close()
checksum_before = dir_checksum(os.path.dirname(_file.name))
shutil.move(os.path.abspath(_file.name), os.path.join(os.path.dirname(_file.name), "different_name"))
checksum_after = dir_checksum(os.path.dirname(_file.name))
self.assertNotEqual(checksum_before, checksum_after)
def test_dir_cyclic_links(self):
_file = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
_file.write(b"Testfile")
_file.close()
os.symlink(os.path.abspath(_file.name), os.path.join(os.path.dirname(_file.name), "symlink"))
os.symlink(
os.path.join(os.path.dirname(_file.name), "symlink"), os.path.join(os.path.dirname(_file.name), "symlink2")
)
os.unlink(os.path.abspath(_file.name))
os.symlink(os.path.join(os.path.dirname(_file.name), "symlink2"), os.path.abspath(_file.name))
with self.assertRaises(OSError) as ex:
dir_checksum(os.path.dirname(_file.name))
self.assertIn("Too many levels of symbolic links", ex.message)
def test_str_checksum(self):
checksum = str_checksum("Hello, World!")
self.assertEqual(checksum, "65a8e27d8879283831b664bd8b7f0ad4")
| 39.101266
| 119
| 0.65976
|
4a13256cfb5fe27f06e4b9aeb769a97425b81e94
| 5,911
|
py
|
Python
|
capture.py
|
Hoke19/Network-intrusion-dataset-creator
|
c2f335ee6910602f39fe4b8e45bfd9893e906d26
|
[
"MIT"
] | null | null | null |
capture.py
|
Hoke19/Network-intrusion-dataset-creator
|
c2f335ee6910602f39fe4b8e45bfd9893e906d26
|
[
"MIT"
] | null | null | null |
capture.py
|
Hoke19/Network-intrusion-dataset-creator
|
c2f335ee6910602f39fe4b8e45bfd9893e906d26
|
[
"MIT"
] | null | null | null |
# MIT License
# Copyright (c) 2018 nrajasin
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import multiprocessing
import subprocess
import json
import time
import multiprocessing
# capture packets using wireshark and convert them to python dictionary objects
# args input-file-name, ethernet-interface, how-long
class PacketCapture(multiprocessing.Process):
def __init__(
self, name, tshark_program, input_file_name, interface, how_long, outQ
):
multiprocessing.Process.__init__(self)
self.name = name
self.tshark_program = tshark_program
self.input_file_name = input_file_name
self.interface = interface
self.how_long = how_long
self.outQ = outQ
# This is a global foo_foo_ to foo. keymap that is shared across all packets
self.keymap = {}
def run(self):
cmd = "sudo " + self.tshark_program + " -V -i -l -T ek"
if self.input_file_name is not None:
cmd = "" + self.tshark_program + " -V -r " + self.input_file_name + " -T ek"
else:
cmd = (
"sudo "
+ self.tshark_program
+ " -V -i "
+ self.interface
+ " -a duration:"
+ str(self.how_long)
+ " -l -T ek"
)
print("PacketCapture: run(): Capturing with: ", cmd)
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
shell=True,
universal_newlines=True,
)
json_str = ""
num_read = 0
start_timer = time.perf_counter()
# for line in p.stdout:
while True:
line = p.stdout.readline()
if "layers" in line:
num_read += 1
# print("PacketCapture: working with line ", line)
json_obj = json.loads(line.strip())
source_filter = json_obj["layers"]
keyval = source_filter.items()
# print("PacketCapture: working with dict ", line)
a = self.unwrap(keyval)
# print("PacketCapture: working with packet ", a)
self.send_data(a)
else:
# we get blank lines
# print("PacketCapture: ignoring: ",line)
pass
if not line and p.poll() is not None:
# possible could delay here to let processing complete
# print("PacketCapture: We're done - no input and tshark exited")
self.send_data({})
break
end_timer = time.perf_counter()
print(
"PacketCapture.run: processed:",
str(num_read),
" rate:",
str(num_read / (end_timer - start_timer)),
)
p.stdout.close()
p.wait()
# saves each dictionary object into a Queue
def send_data(self, dictionary):
# print("PacketCapture: sending dictionary size: ", len(dictionary))
# print("PacketCapture: sending dictionary : ", dictionary)
self.outQ.put(dictionary)
# this function unwraps a multi level JSON object into a python dictionary with key value pairs
def unwrap(self, keyval):
newKeyval = {}
for key1, value1 in keyval:
if key1 not in self.keymap:
# weirdness in the export format when using EK which we use because all on one line
# The json has some with xxx.flags xxx.flags_tree xx.flags.yyy the _tree doesn't show up in this format
# couldn't figure out how to convert 'xxx_xxx_' to 'xxx.' so converted 'xxx_xxx_' to 'xxx__' and then 'xxx.'
# found src_ and dst_ in arp
# found request_ record_ flags_ inside some keys. Might want to tighten down record_ can be an inner key
massagedKey1 = (
re.sub(r"(\w+_)(\1)+", r"\1_", key1)
.replace("__", ".")
.replace("request_", "request.")
.replace("record_", "record.")
.replace("flags_", "flags.")
.replace("src_", "src.")
.replace("dst_", "dst.")
)
# add the before and after to the map so we don't have to calculate again
self.keymap[key1] = massagedKey1
# print("PacketCapture: registered mapping: ", key1, " --> ",massagedKey1)
if isinstance(value1, (str, bool, list)):
newKeyval[self.keymap[key1]] = value1
elif value1 is None:
# print("PacketCapture: Ignoring and tossing null value", key1)
pass
else:
newKeyval.update(self.unwrap(value1.items()))
return newKeyval
| 39.406667
| 124
| 0.584165
|
4a13258bcf56f527209963e370f0234a8d0550e5
| 1,081
|
py
|
Python
|
lingvodoc/views/v2/convert_five_tiers_validate/view.py
|
SegFaulti4/lingvodoc
|
8b296b43453a46b814d3cd381f94382ebcb9c6a6
|
[
"Apache-2.0"
] | 5
|
2017-03-30T18:02:11.000Z
|
2021-07-20T16:02:34.000Z
|
lingvodoc/views/v2/convert_five_tiers_validate/view.py
|
SegFaulti4/lingvodoc
|
8b296b43453a46b814d3cd381f94382ebcb9c6a6
|
[
"Apache-2.0"
] | 15
|
2016-02-24T13:16:59.000Z
|
2021-09-03T11:47:15.000Z
|
lingvodoc/views/v2/convert_five_tiers_validate/view.py
|
Winking-maniac/lingvodoc
|
f037bf0e91ccdf020469037220a43e63849aa24a
|
[
"Apache-2.0"
] | 22
|
2015-09-25T07:13:40.000Z
|
2021-08-04T18:08:26.000Z
|
import logging
import tempfile
from pyramid.view import view_config
from pyramid.httpexceptions import (
HTTPOk,
HTTPNotFound,
HTTPError,
HTTPBadRequest
)
import tempfile
from lingvodoc.scripts import elan_parser
from urllib import request
@view_config(route_name='convert_five_tiers_validate', renderer='json', request_method='POST')
def convert_dictionary(req): # TODO: test
log = logging.getLogger(__name__)
try:
eaf_url = req.json_body['eaf_url']
result = False
eaffile = request.urlopen(eaf_url)
except HTTPError as e:
req.response.status = HTTPError.code
return {'error': str(e)}
except KeyError as e:
req.response.status = HTTPBadRequest.code
return {'error': str(e)}
with tempfile.NamedTemporaryFile() as temp:
temp.write(eaffile.read())
elan_check = elan_parser.ElanCheck(temp.name)
elan_check.parse()
if elan_check.check():
result = True
temp.flush()
req.response.status = HTTPOk.code
return {"is_valid": result}
| 27.025
| 94
| 0.677151
|
4a1326460562d52affeb8d4c16668ddc248a50b3
| 881
|
py
|
Python
|
venv/lib/python3.9/site-packages/markdown/__version__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 182
|
2017-03-05T07:43:13.000Z
|
2022-03-15T13:09:07.000Z
|
venv/lib/python3.9/site-packages/markdown/__version__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 15
|
2018-05-02T11:05:30.000Z
|
2018-05-11T20:51:27.000Z
|
env/lib/python3.6/site-packages/markdown/__version__.py
|
bcss-pm/incidents
|
927a102104b5718fe118bceb307d3cd633d6699b
|
[
"MIT"
] | 38
|
2017-04-26T14:13:37.000Z
|
2021-06-24T11:36:38.000Z
|
#
# markdown/__version__.py
#
# version_info should conform to PEP 386
# (major, minor, micro, alpha/beta/rc/final, #)
# (1, 1, 2, 'alpha', 0) => "1.1.2.dev"
# (1, 2, 0, 'beta', 2) => "1.2b2"
version_info = (2, 6, 11, 'final', 0)
def _get_version():
" Returns a PEP 386-compliant version number from version_info. "
assert len(version_info) == 5
assert version_info[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version_info[2] == 0 else 3
main = '.'.join(map(str, version_info[:parts]))
sub = ''
if version_info[3] == 'alpha' and version_info[4] == 0:
# TODO: maybe append some sort of git info here??
sub = '.dev'
elif version_info[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version_info[3]] + str(version_info[4])
return str(main + sub)
version = _get_version()
| 28.419355
| 69
| 0.586833
|
4a1327320f9094127947c47b92be759a493920d2
| 12,651
|
py
|
Python
|
tests/test_read_pdf_table.py
|
cjotade/tabula-py
|
dc55064b6c71bc1bcc010f76c397e2256abd9d7b
|
[
"MIT"
] | null | null | null |
tests/test_read_pdf_table.py
|
cjotade/tabula-py
|
dc55064b6c71bc1bcc010f76c397e2256abd9d7b
|
[
"MIT"
] | null | null | null |
tests/test_read_pdf_table.py
|
cjotade/tabula-py
|
dc55064b6c71bc1bcc010f76c397e2256abd9d7b
|
[
"MIT"
] | null | null | null |
import filecmp
import json
import os
import platform
import shutil
import subprocess
import tempfile
import unittest
import uuid
from unittest.mock import patch
import pandas as pd
import tabula
class TestReadPdfTable(unittest.TestCase):
def setUp(self):
self.uri = (
"https://github.com/chezou/tabula-py/raw/"
"master/tests/resources/12s0324.pdf"
)
self.pdf_path = "tests/resources/data.pdf"
self.expected_csv1 = "tests/resources/data_1.csv"
def test_read_pdf(self):
df = tabula.read_pdf(self.pdf_path, stream=True)
self.assertTrue(len(df), 1)
self.assertTrue(isinstance(df[0], pd.DataFrame))
self.assertTrue(df[0].equals(pd.read_csv(self.expected_csv1)))
def test_read_remote_pdf(self):
df = tabula.read_pdf(self.uri)
self.assertTrue(len(df), 1)
self.assertTrue(isinstance(df[0], pd.DataFrame))
def test_read_remote_pdf_with_custom_user_agent(self):
df = tabula.read_pdf(self.uri, user_agent="Mozilla/5.0", stream=True)
self.assertTrue(len(df), 1)
self.assertTrue(isinstance(df[0], pd.DataFrame))
def test_read_pdf_into_json(self):
expected_json = "tests/resources/data_1.json"
json_data = tabula.read_pdf(
self.pdf_path, output_format="json", stream=True, multiple_tables=False
)
self.assertTrue(isinstance(json_data, list))
with open(expected_json) as json_file:
data = json.load(json_file)
self.assertEqual(json_data, data)
def test_read_pdf_with_option(self):
expected_csv2 = "tests/resources/data_2-3.csv"
expected_df2 = pd.read_csv(expected_csv2)
self.assertTrue(
tabula.read_pdf(self.pdf_path, pages=1, stream=True)[0].equals(
pd.read_csv(self.expected_csv1)
)
)
self.assertTrue(
tabula.read_pdf(
self.pdf_path,
pages="2-3",
stream=True,
guess=False,
multiple_tables=False,
)[0].equals(expected_df2)
)
self.assertTrue(
tabula.read_pdf(
self.pdf_path,
pages=(2, 3),
stream=True,
guess=False,
multiple_tables=False,
)[0].equals(expected_df2)
)
def test_read_pdf_with_columns(self):
pdf_path = "tests/resources/campaign_donors.pdf"
expected_csv = "tests/resources/campaign_donors.csv"
self.assertTrue(
tabula.read_pdf(
pdf_path, columns=[47, 147, 256, 310, 375, 431, 504], guess=False
)[0].equals(pd.read_csv(expected_csv))
)
def test_read_pdf_file_like_obj(self):
with open(self.pdf_path, "rb") as f:
df = tabula.read_pdf(f, stream=True)
self.assertTrue(len(df), 1)
self.assertTrue(isinstance(df[0], pd.DataFrame))
self.assertTrue(df[0].equals(pd.read_csv(self.expected_csv1)))
def test_read_pdf_pathlib(self):
from pathlib import Path
df = tabula.read_pdf(Path(self.pdf_path), stream=True)
self.assertTrue(len(df), 1)
self.assertTrue(isinstance(df[0], pd.DataFrame))
self.assertTrue(df[0].equals(pd.read_csv(self.expected_csv1)))
def test_read_pdf_with_multiple_areas(self):
# Original files are taken from
# https://github.com/tabulapdf/tabula-java/pull/213
pdf_path = "tests/resources/MultiColumn.pdf"
expected_csv = "tests/resources/MultiColumn.csv"
expected_df = pd.read_csv(expected_csv)
self.assertTrue(
tabula.read_pdf(
pdf_path,
pages=1,
area=[[0, 0, 100, 50], [0, 50, 100, 100]],
relative_area=True,
multiple_tables=False,
)[0].equals(expected_df)
)
self.assertTrue(
tabula.read_pdf(
pdf_path,
pages=1,
area=[[0, 0, 451, 212], [0, 212, 451, 425]],
multiple_tables=False,
)[0].equals(expected_df)
)
def test_read_pdf_with_java_option(self):
self.assertTrue(
tabula.read_pdf(
self.pdf_path, pages=1, stream=True, java_options=["-Xmx256m"]
)[0].equals(pd.read_csv(self.expected_csv1))
)
def test_read_pdf_with_pandas_option(self):
column_name = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
self.assertTrue(
tabula.read_pdf(
self.pdf_path, pages=1, stream=True, pandas_options={"header": None}
)[0].equals(pd.read_csv(self.expected_csv1, header=None))
)
self.assertTrue(
tabula.read_pdf(
self.pdf_path, pages=1, stream=True, pandas_options={"header": 0}
)[0].equals(pd.read_csv(self.expected_csv1, header=0))
)
self.assertTrue(
tabula.read_pdf(
self.pdf_path, pages=1, stream=True, pandas_options={"header": "infer"}
)[0].equals(pd.read_csv(self.expected_csv1, header="infer"))
)
self.assertTrue(
tabula.read_pdf(
self.pdf_path,
pages=1,
stream=True,
pandas_options={"header": "infer", "names": column_name},
)[0].equals(
pd.read_csv(self.expected_csv1, header="infer", names=column_name)
)
)
self.assertTrue(
tabula.read_pdf(
self.pdf_path,
pages=1,
stream=True,
multiple_tables=True,
pandas_options={"header": "infer", "names": column_name},
)[0].equals(
pd.read_csv(self.expected_csv1, header="infer", names=column_name)
)
)
self.assertTrue(
tabula.read_pdf(
self.pdf_path,
pages=1,
stream=True,
multiple_tables=True,
pandas_options={"header": "infer", "columns": column_name},
)[0].equals(
pd.read_csv(self.expected_csv1, header="infer", names=column_name)
)
)
def test_read_pdf_for_multiple_tables(self):
self.assertEqual(
len(
tabula.read_pdf(
self.pdf_path, pages=2, multiple_tables=True, stream=True
)
),
2,
)
self.assertTrue(
tabula.read_pdf(self.pdf_path, pages=1, multiple_tables=True, stream=True)[
0
].equals(pd.read_csv(self.expected_csv1))
)
with self.assertRaises(tabula.errors.CSVParseError):
tabula.read_pdf(self.pdf_path, pages=2, multiple_tables=False)
def test_read_pdf_exception(self):
invalid_pdf_path = "notexist.pdf"
with self.assertRaises(FileNotFoundError):
tabula.read_pdf(invalid_pdf_path)
with self.assertRaises(TypeError):
tabula.read_pdf(self.pdf_path, unknown_option="foo")
with self.assertRaises(ValueError):
tabula.read_pdf(self.pdf_path, output_format="unknown")
def test_convert_from(self):
expected_tsv = "tests/resources/data_1.tsv"
expected_json = "tests/resources/data_1.json"
with tempfile.TemporaryDirectory() as tempdir:
temp = os.path.join(tempdir, str(uuid.uuid4()))
tabula.convert_into(self.pdf_path, temp, output_format="csv", stream=True)
self.assertTrue(filecmp.cmp(temp, self.expected_csv1))
tabula.convert_into(self.pdf_path, temp, output_format="tsv", stream=True)
self.assertTrue(filecmp.cmp(temp, expected_tsv))
tabula.convert_into(self.pdf_path, temp, output_format="json", stream=True)
self.assertTrue(filecmp.cmp(temp, expected_json))
def test_convert_into_by_batch(self):
temp_dir = tempfile.mkdtemp()
temp_pdf = temp_dir + "/data.pdf"
converted_csv = temp_dir + "/data.csv"
shutil.copyfile(self.pdf_path, temp_pdf)
try:
tabula.convert_into_by_batch(temp_dir, output_format="csv", stream=True)
self.assertTrue(filecmp.cmp(converted_csv, self.expected_csv1))
finally:
shutil.rmtree(temp_dir)
with self.assertRaises(ValueError):
tabula.convert_into_by_batch(None, output_format="csv")
def test_convert_remote_file(self):
with tempfile.TemporaryDirectory() as tempdir:
temp = os.path.join(tempdir, str(uuid.uuid4()))
tabula.convert_into(self.uri, temp, output_format="csv")
self.assertTrue(os.path.exists(temp))
def test_convert_into_exception(self):
with self.assertRaises(ValueError):
tabula.convert_into(self.pdf_path, "test.csv", output_format="dataframe")
with self.assertRaises(ValueError):
tabula.convert_into(self.pdf_path, None)
with self.assertRaises(ValueError):
tabula.convert_into(self.pdf_path, "")
def test_read_pdf_with_template(self):
template_path = "tests/resources/data.tabula-template.json"
dfs = tabula.read_pdf_with_template(self.pdf_path, template_path)
self.assertEqual(len(dfs), 4)
self.assertTrue(dfs[0].equals(pd.read_csv(self.expected_csv1)))
def test_read_pdf_with_remote_template(self):
template_path = (
"https://github.com/chezou/tabula-py/raw/master/"
"tests/resources/data.tabula-template.json"
)
dfs = tabula.read_pdf_with_template(self.pdf_path, template_path)
self.assertEqual(len(dfs), 4)
self.assertTrue(dfs[0].equals(pd.read_csv(self.expected_csv1)))
@patch("subprocess.run")
@patch("tabula.io._jar_path")
def test_read_pdf_with_jar_path(self, jar_func, mock_fun):
jar_func.return_value = "/tmp/tabula-java.jar"
tabula.read_pdf(self.pdf_path, encoding="utf-8")
target_args = ["java"]
if platform.system() == "Darwin":
target_args += ["-Djava.awt.headless=true"]
target_args += [
"-Dfile.encoding=UTF8",
"-jar",
"/tmp/tabula-java.jar",
"--guess",
"--format",
"JSON",
"tests/resources/data.pdf",
]
subp_args = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"stdin": subprocess.DEVNULL,
"check": True,
}
mock_fun.assert_called_with(target_args, **subp_args)
def test_read_pdf_with_dtype_string(self):
pdf_path = "tests/resources/data_dtype.pdf"
expected_csv = "tests/resources/data_dtype_expected.csv"
expected_csv2 = "tests/resources/data_2-3.csv"
template_path = "tests/resources/data_dtype.tabula-template.json"
template_expected_csv = "tests/resources/data_dtype_template_expected.csv"
pandas_options = {'dtype': str}
self.assertTrue(
tabula.read_pdf(
self.pdf_path,
stream=True,
pages=1,
multiple_tables=False,
pandas_options=pandas_options.copy()
).equals(pd.read_csv(self.expected_csv1, **pandas_options))
)
self.assertTrue(
tabula.read_pdf(
self.pdf_path,
pages="2-3",
stream=True,
guess=False,
multiple_tables=False,
pandas_options=pandas_options.copy()
).equals(pd.read_csv(expected_csv2, **pandas_options))
)
pandas_options = {'header': None, 'dtype': str}
dfs = tabula.read_pdf(
pdf_path,
multiple_tables=True,
pandas_options=pandas_options.copy()
)
self.assertEqual(len(dfs), 4)
self.assertTrue(
dfs[0].equals(pd.read_csv(expected_csv, **pandas_options))
)
dfs_template = tabula.read_pdf_with_template(
pdf_path,
template_path,
stream=True,
pages='all',
pandas_options=pandas_options.copy()
)
self.assertEqual(len(dfs_template), 5)
self.assertTrue(
dfs_template[0].equals(pd.read_csv(template_expected_csv, **pandas_options))
)
if __name__ == "__main__":
unittest.main()
| 36.353448
| 88
| 0.587226
|
4a1327cfbeaf8198876a6929a3a66872c137d46c
| 5,598
|
py
|
Python
|
Sources/AlphaBot2/python/prgm.py
|
maroneal/SmartC
|
515502d69832b5acf427715b87f0cc17d10e7987
|
[
"BSD-2-Clause"
] | null | null | null |
Sources/AlphaBot2/python/prgm.py
|
maroneal/SmartC
|
515502d69832b5acf427715b87f0cc17d10e7987
|
[
"BSD-2-Clause"
] | null | null | null |
Sources/AlphaBot2/python/prgm.py
|
maroneal/SmartC
|
515502d69832b5acf427715b87f0cc17d10e7987
|
[
"BSD-2-Clause"
] | 2
|
2019-03-04T08:26:39.000Z
|
2019-04-15T09:40:31.000Z
|
import time
import os
import RPi.GPIO as GPIO
from adafruit_servokit import ServoKit
import math
from picamera import PiCamera
from AlphaBot2 import AlphaBot2
kit = ServoKit(channels=16)
Ab = AlphaBot2()
#camera = PiCamera()
BUZ = 4
IR = 17 #Remote controller
DR = 16
DL = 19
PWM = 50
n = 0
TRIG = 22
ECHO = 27
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(IR,GPIO.IN)
GPIO.setup(DR,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(DL,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(BUZ,GPIO.OUT)
GPIO.setup(TRIG,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(ECHO,GPIO.IN)
def getkey():
if GPIO.input(IR) == 0:
count = 0
while GPIO.input(IR) == 0 and count < 200: #9ms
count += 1
time.sleep(0.00006)
if(count < 10):
return;
count = 0
while GPIO.input(IR) == 1 and count < 80: #4.5ms
count += 1
time.sleep(0.00006)
idx = 0
cnt = 0
data = [0,0,0,0]
for i in range(0,32):
count = 0
while GPIO.input(IR) == 0 and count < 15: #0.56ms
count += 1
time.sleep(0.00006)
count = 0
while GPIO.input(IR) == 1 and count < 40: #0: 0.56mx
count += 1 #1: 1.69ms
time.sleep(0.00006)
if count > 7:
data[idx] |= 1<<cnt
if cnt == 7:
cnt = 0
idx += 1
else:
cnt += 1
# print data
if data[0]+data[1] == 0xFF and data[2]+data[3] == 0xFF: #check
return data[2]
else:
print("repeat")
return "repeat"
def dist():
GPIO.output(TRIG,GPIO.HIGH)
time.sleep(0.000015)
GPIO.output(TRIG,GPIO.LOW)
while not GPIO.input(ECHO):
pass
t1 = time.time()
while GPIO.input(ECHO):
pass
t2 = time.time()
return (t2-t1)*34000/2
def stop_servos():
kit.servo[0].set_pulse_width_range(0,0)
kit.servo[1].set_pulse_width_range(0,0)
kit.servo[0].fraction = 0
kit.servo[1].fraction = 0
#camera.start_preview()
kit.servo[0].actuation_range = 180
kit.servo[1].actuation_range = 180
kit.servo[0].set_pulse_width_range(500,2500)
kit.servo[1].set_pulse_width_range(500,2500)
kit.servo[0].angle = 90
kit.servo[1].angle = 90
Ab.stop()
try:
while True:
DR_status = GPIO.input(DR)
DL_status = GPIO.input(DL)
#print(DR_status,DL_status)
if((DL_status == 1) and (DR_status == 0)):
#GPIO.output(BUZ,GPIO.HIGH)
print("obstacle on the left")
Ab.right()
time.sleep(0.002)
Ab.stop()
elif((DL_status == 0) and (DR_status == 1)):
#GPIO.output(BUZ,GPIO.HIGH)
print("obstacle on the right")
Ab.left()
time.sleep(0.002)
Ab.stop()
elif((DL_status == 0) and (DR_status == 0)):
#GPIO.output(BUZ,GPIO.HIGH)
Ab.backward()
time.sleep(0.002)
Ab.stop()
GPIO.output(BUZ,GPIO.LOW)
key = getkey()
if(key != None):
n = 0
i = 0
if key == 0x18: #2
if((DL_status == 1) and (DR_status == 1)):
Ab.forward()
print("forward")
if key == 0x08: #4
Ab.left()
print("left")
if key == 0x1c: #5
Ab.stop()
print("stop")
if key == 0x5a: #6
Ab.right()
print("right")
if key == 0x52: #8
Ab.backward()
print("backward")
if key == 0x15: #+
if(PWM + 10 < 101):
PWM = PWM + 10
Ab.setPWMA(PWM)
Ab.setPWMB(PWM)
print(PWM)
if key == 0x07: #-
if(PWM - 10 > -1):
PWM = PWM - 10
Ab.setPWMA(PWM)
Ab.setPWMB(PWM)
print(PWM)
if key == 0x09: #EQ
kit.servo[0].angle = 90
kit.servo[1].angle = 90
if key == 0x44: #<<
print("servo left")
for i in range(0,400,1):
if kit.servo[0].angle < 179:
kit.servo[0].angle += 0.002*i
time.sleep(0.002)
if key == 0x40: #>>
print("servo right")
for i in range(0,250,1):
if kit.servo[0].angle > 1:
kit.servo[0].angle -= 0.001*i
time.sleep(0.002)
if key == 0x47: #CH+
print("servo up")
for i in range(0,250,1):
if kit.servo[1].angle > 1:
kit.servo[1].angle -= 0.001*i
time.sleep(0.002)
if key == 0x45: #CH-
for i in range(0,400,1):
if kit.servo[1].angle < 179:
kit.servo[1].angle += 0.002*i
time.sleep(0.002)
if key == 0x16: #0
stop_servos()
os.sys.exit()
else:
n += 1
if n > 20000:
n = 0
Ab.stop()
except KeyboardInterrupt:
GPIO.cleanup();
| 28.85567
| 71
| 0.435334
|
4a132862707004f44e3168b4c3953ddf92017152
| 2,388
|
py
|
Python
|
examples/example_jumping_robot/src/jr_graph_builder.py
|
danbarla/GTDynamics
|
0448b359aff9e0e784832666e4048ee01c8b082d
|
[
"BSD-2-Clause"
] | null | null | null |
examples/example_jumping_robot/src/jr_graph_builder.py
|
danbarla/GTDynamics
|
0448b359aff9e0e784832666e4048ee01c8b082d
|
[
"BSD-2-Clause"
] | null | null | null |
examples/example_jumping_robot/src/jr_graph_builder.py
|
danbarla/GTDynamics
|
0448b359aff9e0e784832666e4048ee01c8b082d
|
[
"BSD-2-Clause"
] | null | null | null |
"""
* GTDynamics Copyright 2020, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file jr_graph_builder.py
* @brief Create factor graphs for the jumping robot.
* @author Yetong Zhang
"""
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
sys.path.insert(0,currentdir)
import gtdynamics as gtd
import gtsam
from gtsam import noiseModel, NonlinearFactorGraph
import numpy as np
from jumping_robot import Actuator, JumpingRobot
from actuation_graph_builder import ActuationGraphBuilder
from robot_graph_builder import RobotGraphBuilder
class JRGraphBuilder:
""" Class that constructs factor graphs for a jumping robot. """
def __init__(self):
"""Initialize the graph builder, specify all noise models."""
self.robot_graph_builder = RobotGraphBuilder()
self.actuation_graph_builder = ActuationGraphBuilder()
def collocation_graph(self, jr: JumpingRobot, step_phases: list):
""" Create a factor graph containing collocation constraints. """
graph = self.actuation_graph_builder.collocation_graph(jr, step_phases)
graph.push_back(self.robot_graph_builder.collocation_graph(jr, step_phases))
# add collocation factors for time
for time_step in range(len(step_phases)):
phase = step_phases[time_step]
k_prev = time_step
k_curr = time_step+1
dt_key = gtd.PhaseKey(phase).key()
time_prev_key = gtd.TimeKey(k_prev).key()
time_curr_key = gtd.TimeKey(k_curr).key()
time_col_cost_model = self.robot_graph_builder.graph_builder.opt().time_cost_model
gtd.AddTimeCollocationFactor(graph, time_prev_key, time_curr_key,
dt_key, time_col_cost_model)
return graph
def dynamics_graph(self, jr: JumpingRobot, k: int) -> NonlinearFactorGraph:
""" Create a factor graph containing dynamcis constraints for
the robot, actuators and source tank at a certain time step
"""
graph = self.actuation_graph_builder.dynamics_graph(jr, k)
graph.add(self.robot_graph_builder.dynamics_graph(jr, k))
return graph
| 37.3125
| 94
| 0.708961
|
4a132a12f3ef5c06f148afe7df0c4e323db0979a
| 1,079
|
py
|
Python
|
source/pkgsrc/games/unknown-horizons/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1
|
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/games/unknown-horizons/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/games/unknown-horizons/patches/patch-setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-setup.py,v 1.2 2021/03/09 09:39:11 nia Exp $
- On NetBSD platform.dist() is not defined. Always install to
the same binary directory anyway, for consistency.
- Install man pages to PKGMANDIR.
--- setup.py.orig 2019-01-12 15:15:42.000000000 +0000
+++ setup.py
@@ -39,10 +39,7 @@ from horizons.ext import polib
# Ensure we are in the correct directory
os.chdir(os.path.realpath(os.path.dirname(__file__)))
-if platform.dist()[0].lower() in ('debian', 'ubuntu'):
- executable_path = 'games'
-else:
- executable_path = 'bin'
+executable_path = 'bin'
# this trick is for setting RELEASE_VERSION if the code is cloned from git repository
@@ -54,7 +51,7 @@ data = [
(executable_path, ('unknown-horizons', )),
('share/pixmaps', ('content/packages/unknown-horizons.xpm', )),
('share/unknown-horizons', ('content/settings-template.xml', )),
- ('share/man/man6', ('content/packages/unknown-horizons.6', )),
+ ('@PKGMANDIR@/man6', ('content/packages/unknown-horizons.6', )),
]
for root, dirs, files in [x for x in os.walk('content') if len(x[2])]:
| 35.966667
| 86
| 0.681186
|
4a132bf9bdfdad5bd4a529b79a9343c8a9bd4db6
| 32,111
|
py
|
Python
|
openbook_auth/tests/views/test_authenticated_user.py
|
TamaraAbells/okuna-api
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
[
"MIT"
] | 164
|
2019-07-29T17:59:06.000Z
|
2022-03-19T21:36:01.000Z
|
openbook_auth/tests/views/test_authenticated_user.py
|
TamaraAbells/okuna-api
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
[
"MIT"
] | 188
|
2019-03-16T09:53:25.000Z
|
2019-07-25T14:57:24.000Z
|
openbook_auth/tests/views/test_authenticated_user.py
|
TamaraAbells/okuna-api
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
[
"MIT"
] | 80
|
2019-08-03T17:49:08.000Z
|
2022-02-28T16:56:33.000Z
|
from unittest import mock
from urllib.parse import urlsplit
from django.urls import reverse
from faker import Faker
from rest_framework import status
from mixer.backend.django import mixer
from openbook_circles.models import Circle
from openbook_common.tests.models import OpenbookAPITestCase
from openbook_auth.models import User
import logging
import json
from openbook_auth.views.authenticated_user.views import AuthenticatedUserSettings
from openbook_common.tests.helpers import make_user, make_authentication_headers_for_user, make_user_bio, \
make_user_location, make_user_avatar, make_user_cover, make_random_language
fake = Faker()
logger = logging.getLogger(__name__)
class AuthenticatedUserAPITests(OpenbookAPITestCase):
"""
AuthenticatedUserAPI
"""
fixtures = [
'openbook_circles/fixtures/circles.json'
]
def test_can_retrieve_user(self):
"""
should return 200 and the data of the authenticated user
"""
user = make_user()
auth_token = user.auth_token.key
header = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
url = self._get_url()
response = self.client.get(url, **header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertIn('username', parsed_response)
response_username = parsed_response['username']
self.assertEqual(response_username, user.username)
def test_can_update_user_username(self):
"""
should be able to update the authenticated user username and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_username = fake.user_name()
data = {
'username': new_username
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.username, new_username)
def test_can_update_user_username_to_same_username(self):
"""
should be able to update the authenticated user username to the same it already has and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
data = {
'username': user.username
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.username, user.username)
def test_cannot_update_user_username_to_taken_username(self):
"""
should be able to update the authenticated user username to a taken username and return 400
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
user_b = make_user()
data = {
'username': user_b.username
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
user.refresh_from_db()
self.assertNotEqual(user.username, user_b.username)
def test_can_update_user_name(self):
"""
should be able to update the authenticated user name and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_name = fake.name()
data = {
'name': new_name
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.profile.name, new_name)
def test_can_update_user_bio(self):
"""
should be able to update the authenticated user bio and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_bio = make_user_bio()
data = {
'bio': new_bio
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.profile.bio, new_bio)
def test_can_update_user_location(self):
"""
should be able to update the authenticated user location and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_location = make_user_location()
data = {
'location': new_location
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.profile.location, new_location)
def test_can_update_user_followers_count_visible(self):
"""
should be able to update the authenticated user followers_count_visible and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_followers_count_visible = not user.profile.followers_count_visible
data = {
'followers_count_visible': new_followers_count_visible
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.profile.followers_count_visible, new_followers_count_visible)
def test_can_update_user_community_posts_visible(self):
"""
should be able to update the authenticated user community_posts_visible and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_community_posts_visible = not user.profile.community_posts_visible
data = {
'community_posts_visible': new_community_posts_visible
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.profile.community_posts_visible, new_community_posts_visible)
def test_can_update_user_avatar(self):
"""
should be able to update the authenticated user avatar and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_avatar = make_user_avatar()
data = {
'avatar': new_avatar
}
url = self._get_url()
response = self.client.patch(url, data, **headers, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertIsNotNone(user.profile.avatar)
def test_can_update_user_avatar_plus_username(self):
"""
should be able to update the authenticated user avatar and username and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_avatar = make_user_avatar()
new_username = 'paulyd97'
data = {
'avatar': new_avatar,
'username': new_username
}
url = self._get_url()
response = self.client.patch(url, data, **headers, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertIsNotNone(user.profile.avatar)
self.assertEqual(user.username, new_username)
def test_can_delete_user_avatar(self):
"""
should be able to delete the authenticated user avatar and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
user.profile.avatar = make_user_avatar()
user.save()
data = {
'avatar': ''
}
url = self._get_url()
response = self.client.patch(url, data, **headers, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertTrue(not user.profile.avatar)
def test_can_update_user_cover(self):
"""
should be able to update the authenticated user cover and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_cover = make_user_cover()
data = {
'cover': new_cover
}
url = self._get_url()
response = self.client.patch(url, data, **headers, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertIsNotNone(user.profile.cover)
def test_can_delete_user_cover(self):
"""
should be able to delete the authenticated user cover and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
user.profile.cover = make_user_cover()
user.save()
data = {
'cover': ''
}
url = self._get_url()
response = self.client.patch(url, data, **headers, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertTrue(not user.profile.cover)
def test_can_delete_user_bio(self):
"""
should be able to delete the authenticated user bio and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
user.profile.bio = make_user_bio()
user.save()
data = {
'bio': ''
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertTrue(not user.profile.bio)
def test_can_delete_user_location(self):
"""
should be able to delete the authenticated user location and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
user.profile.location = make_user_location()
user.save()
data = {
'location': ''
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertTrue(not user.profile.location)
def test_can_delete_user_url(self):
"""
should be able to delete the authenticated user url and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
user.profile.url = fake.url()
user.save()
data = {
'url': ''
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertTrue(not user.profile.url)
def test_can_update_user_url(self):
"""
should be able to update the authenticated user url and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_url = fake.url()
data = {
'url': new_url
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(new_url, user.profile.url)
def test_can_update_user_url_with_not_fully_qualified_urls(self):
"""
should be able to update the authenticated user url with not fully qualified urls and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_url = fake.url()
parsed_url = urlsplit(new_url)
unfully_qualified_url = parsed_url.netloc
data = {
'url': unfully_qualified_url
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual('https://' + unfully_qualified_url, user.profile.url)
def test_can_update_user_visibility(self):
"""
should be able to update the authenticated user visibility and return 200
"""
for initial_visibility in User.VISIBILITY_TYPES:
for new_visibility in User.VISIBILITY_TYPES:
if new_visibility == initial_visibility:
return
user = make_user(visibility=initial_visibility)
headers = make_authentication_headers_for_user(user)
data = {
'visibility': new_visibility
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.visibility, new_visibility)
def test_when_updating_visibility_to_public_existing_follow_requests_get_deleted(self):
"""
when updating the visibility to public, existing follow requests should be deleted
"""
initial_visibility = User.VISIBILITY_TYPE_PRIVATE
new_visibility = User.VISIBILITY_TYPE_PUBLIC
user = make_user(visibility=initial_visibility)
headers = make_authentication_headers_for_user(user)
user_requesting_to_follow = make_user()
user_requesting_to_follow.create_follow_request_for_user(user=user)
data = {
'visibility': new_visibility
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(user.has_follow_request_from_user(user_requesting_to_follow))
def test_when_updating_visibility_to_private_existing_connection_requests_get_deleted(self):
"""
when updating the visibility to private, existing connection requests should be deleted
"""
initial_visibility = User.VISIBILITY_TYPE_PUBLIC
new_visibility = User.VISIBILITY_TYPE_PRIVATE
user = make_user(visibility=initial_visibility)
headers = make_authentication_headers_for_user(user)
number_of_connection_requests = 3
for i in range(number_of_connection_requests):
user_requesting_to_connect = make_user()
circle_to_connect = mixer.blend(Circle, creator=user_requesting_to_connect)
user_requesting_to_connect.connect_with_user_with_id(user.pk, circles_ids=[circle_to_connect.pk])
data = {
'visibility': new_visibility
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.targeted_connections.count(), 0)
def test_when_updating_visibility_to_non_private_from_non_private_existing_connection_requests_dont_get_deleted(self):
"""
when updating the visibility from non private, to another non private, connection requests should not be deleted
"""
for initial_visibility, name in User.VISIBILITY_TYPES:
for new_visibility, n_name in User.VISIBILITY_TYPES:
if initial_visibility == User.VISIBILITY_TYPE_PRIVATE or new_visibility == User.VISIBILITY_TYPE_PRIVATE:
return
user = make_user(visibility=initial_visibility)
headers = make_authentication_headers_for_user(user)
number_of_connection_requests = 3
for i in range(number_of_connection_requests):
user_requesting_to_connect = make_user()
circle_to_connect = mixer.blend(Circle, creator=user_requesting_to_connect)
user_requesting_to_connect.connect_with_user_with_id(user.pk, circles_ids=[circle_to_connect.pk])
data = {
'visibility': new_visibility
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.targeted_connections.count(), number_of_connection_requests)
def _get_url(self):
return reverse('authenticated-user')
class AuthenticatedUserDeleteTests(OpenbookAPITestCase):
fixtures = [
'openbook_circles/fixtures/circles.json'
]
def test_can_delete_user_with_password(self):
"""
should be able to delete the authenticated user with his password and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
user_password = fake.password()
user.set_password(user_password)
user.save()
data = {
'password': user_password
}
url = self._get_url()
response = self.client.post(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(User.objects.filter(pk=user.pk).exists())
def test_cant_delete_user_with_wrong_password(self):
"""
should not be able to delete the authenticated user with a wrong password and return 401
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
user_password = fake.password()
user.save()
data = {
'password': user_password
}
url = self._get_url()
response = self.client.post(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertTrue(User.objects.filter(pk=user.pk).exists())
def test_cant_delete_user_without_password(self):
"""
should not be able to delete the authenticated user without his password and return 400
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
user.save()
url = self._get_url()
response = self.client.post(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(User.objects.filter(pk=user.pk).exists())
def _get_url(self):
return reverse('delete-authenticated-user')
class AuthenticatedUserNotificationsSettingsTests(OpenbookAPITestCase):
"""
AuthenticatedUserNotificationsSettings
"""
def test_can_retrieve_notifications_settings(self):
"""
should be able to retrieve own notifications settings and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
url = self._get_url()
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertIn('id', parsed_response)
response_id = parsed_response['id']
self.assertEqual(response_id, user.notifications_settings.pk)
def test_can_update_notifications_settings(self):
"""
should be able to update notifications settings and return 200
"""
user = make_user()
notifications_settings = user.notifications_settings
notifications_settings.post_comment_notifications = fake.boolean()
notifications_settings.post_reaction_notifications = fake.boolean()
notifications_settings.follow_notifications = fake.boolean()
notifications_settings.follow_request_notifications = fake.boolean()
notifications_settings.follow_request_approved_notifications = fake.boolean()
notifications_settings.connection_request_notifications = fake.boolean()
notifications_settings.connection_confirmed_notifications = fake.boolean()
notifications_settings.community_invite_notifications = fake.boolean()
notifications_settings.community_new_post_notifications = fake.boolean()
notifications_settings.user_new_post_notifications = fake.boolean()
notifications_settings.post_comment_reply_notifications = fake.boolean()
notifications_settings.post_comment_reaction_notifications = fake.boolean()
notifications_settings.post_comment_user_mention_notifications = fake.boolean()
notifications_settings.post_user_mention_notifications = fake.boolean()
notifications_settings.save()
headers = make_authentication_headers_for_user(user)
new_post_comment_notifications = not notifications_settings.post_comment_notifications
new_post_reaction_notifications = not notifications_settings.post_reaction_notifications
new_follow_notifications = not notifications_settings.follow_notifications
new_follow_request_notifications = not notifications_settings.follow_request_notifications
new_follow_request_approved_notifications = not notifications_settings.follow_request_approved_notifications
new_connection_request_notifications = not notifications_settings.connection_request_notifications
new_connection_confirmed_notifications = not notifications_settings.connection_confirmed_notifications
new_community_invite_notifications = not notifications_settings.community_invite_notifications
new_community_new_post_notifications = not notifications_settings.community_new_post_notifications
new_user_new_post_notifications = not notifications_settings.user_new_post_notifications
new_post_comment_reaction_notifications = not notifications_settings.post_comment_reaction_notifications
new_post_comment_reply_notifications = not notifications_settings.post_comment_reply_notifications
new_post_comment_user_mention_notifications = not notifications_settings.post_comment_user_mention_notifications
new_post_user_mention_notifications = not notifications_settings.post_user_mention_notifications
data = {
'post_comment_notifications': new_post_comment_notifications,
'post_reaction_notifications': new_post_reaction_notifications,
'follow_notifications': new_follow_notifications,
'follow_request_notifications': new_follow_request_notifications,
'follow_request_approved_notifications': new_follow_request_approved_notifications,
'connection_request_notifications': new_connection_request_notifications,
'connection_confirmed_notifications': new_connection_confirmed_notifications,
'community_invite_notifications': new_community_invite_notifications,
'community_new_post_notifications': new_community_new_post_notifications,
'user_new_post_notifications': new_user_new_post_notifications,
'post_comment_reply_notifications': new_post_comment_reply_notifications,
'post_comment_reaction_notifications': new_post_comment_reaction_notifications,
'post_comment_user_mention_notifications': new_post_comment_user_mention_notifications,
'post_user_mention_notifications': new_post_user_mention_notifications
}
url = self._get_url()
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
notifications_settings.refresh_from_db()
self.assertEqual(notifications_settings.post_comment_notifications, new_post_comment_notifications)
self.assertEqual(notifications_settings.post_reaction_notifications, new_post_reaction_notifications)
self.assertEqual(notifications_settings.follow_notifications, new_follow_notifications)
self.assertEqual(notifications_settings.follow_request_notifications, new_follow_request_notifications)
self.assertEqual(notifications_settings.follow_request_approved_notifications,
new_follow_request_approved_notifications)
self.assertEqual(notifications_settings.connection_request_notifications, new_connection_request_notifications)
self.assertEqual(notifications_settings.community_invite_notifications, new_community_invite_notifications)
self.assertEqual(notifications_settings.community_new_post_notifications, new_community_new_post_notifications)
self.assertEqual(notifications_settings.user_new_post_notifications, new_user_new_post_notifications)
self.assertEqual(notifications_settings.connection_confirmed_notifications,
new_connection_confirmed_notifications)
self.assertEqual(notifications_settings.post_comment_reply_notifications,
new_post_comment_reply_notifications)
self.assertEqual(notifications_settings.post_comment_reaction_notifications,
new_post_comment_reaction_notifications)
def _get_url(self):
return reverse('authenticated-user-notifications-settings')
class AuthenticatedUserSettingsAPITests(OpenbookAPITestCase):
"""
User Settings API
"""
url = reverse('authenticated-user-settings')
def test_can_change_password_successfully(self):
"""
should be able to update the authenticated user password and return 200
"""
user = make_user()
current_raw_password = user.password
user.update_password(user.password) # make sure hashed password is stored
headers = make_authentication_headers_for_user(user)
new_password = fake.password()
data = {
'new_password': new_password,
'current_password': current_raw_password
}
response = self.client.patch(self.url, data, **headers)
parsed_reponse = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(parsed_reponse['username'], user.username)
def test_cannot_change_password_without_current_password(self):
"""
should not be able to update the user password without supplying the current password
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
new_password = fake.password()
data = {
'new_password': new_password
}
response = self.client.patch(self.url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_change_password_without_correct_password(self):
"""
should not be able to update the authenticated user password without the correct password
"""
user = make_user()
user.update_password(user.password) # make sure hashed password is stored
headers = make_authentication_headers_for_user(user)
new_password = fake.password()
data = {
'new_password': new_password,
'current_password': fake.password() # use another fake password
}
response = self.client.patch(self.url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_cannot_change_password_without_new_password(self):
"""
should not be able to update the authenticated user password without the new password
"""
user = make_user()
current_raw_password = user.password
user.update_password(user.password) # make sure hashed password is stored
headers = make_authentication_headers_for_user(user)
data = {
'current_password': current_raw_password
}
response = self.client.patch(self.url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_change_email_to_existing_email(self):
"""
should not be able to update the authenticated user email to existing email
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
data = {
'email': user.email
}
with mock.patch.object(AuthenticatedUserSettings, 'send_confirmation_email', return_value=None):
response = self.client.patch(self.url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class AuthenticatedUserAcceptGuidelines(OpenbookAPITestCase):
"""
AuthenticatedUserAcceptGuidelines API
"""
url = reverse('authenticated-user-accept-guidelines')
def test_can_accept_guidelines(self):
"""
should be able to accept the guidelines and return 200
"""
user = make_user()
user.are_guidelines_accepted = False
user.save()
headers = make_authentication_headers_for_user(user)
response = self.client.post(self.url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertTrue(user.are_guidelines_accepted)
def test_cant_accept_guidelines_if_aleady_accepted(self):
"""
should not be able to accept the guidelines if already accepted and return 400
"""
user = make_user()
user.are_guidelines_accepted = True
user.save()
headers = make_authentication_headers_for_user(user)
response = self.client.post(self.url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
user.refresh_from_db()
self.assertTrue(user.are_guidelines_accepted)
class AuthenticatedUserLanguageAPI(OpenbookAPITestCase):
"""
AuthenticatedUserLanguageAPI API
"""
fixtures = [
'openbook_common/fixtures/languages.json'
]
def test_can_get_all_languages(self):
"""
should be able to set user language and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
response = self.client.get(self.url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertTrue(len(parsed_response), 25)
def test_can_set_language(self):
"""
should be able to set user language and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
language = make_random_language()
response = self.client.post(self.url, {
'language_id': language.id
}, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertTrue(user.language.id, language.id)
def test_cannot_set_invalid_language(self):
"""
should be able to set user language and return 200
"""
user = make_user()
language = make_random_language()
user.language = language
user.save()
headers = make_authentication_headers_for_user(user)
response = self.client.post(self.url, {
'language_id': 99999
}, **headers)
print(response)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
user.refresh_from_db()
self.assertTrue(user.language.id, language.id)
url = reverse('user-language')
| 32.766327
| 122
| 0.677712
|
4a132dfee586102f9a09b6165079113f880dd4f3
| 1,124
|
py
|
Python
|
python/test/test_stop_order_request.py
|
KoenBal/OANDA_V20_Client
|
e67b9dbaddff6ed23e355d3ce7f9c9972799c702
|
[
"MIT"
] | 1
|
2018-10-25T03:57:32.000Z
|
2018-10-25T03:57:32.000Z
|
python/test/test_stop_order_request.py
|
KoenBal/OANDA_V20_Client
|
e67b9dbaddff6ed23e355d3ce7f9c9972799c702
|
[
"MIT"
] | null | null | null |
python/test/test_stop_order_request.py
|
KoenBal/OANDA_V20_Client
|
e67b9dbaddff6ed23e355d3ce7f9c9972799c702
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: api@oanda.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import oanda
from oanda.models.stop_order_request import StopOrderRequest # noqa: E501
from oanda.rest import ApiException
class TestStopOrderRequest(unittest.TestCase):
"""StopOrderRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStopOrderRequest(self):
"""Test StopOrderRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = oanda.models.stop_order_request.StopOrderRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.414634
| 298
| 0.720641
|
4a132e03be79dabfaf1ac43e2c37823b4dd65cf5
| 2,277
|
py
|
Python
|
rssfly/extractor/comic_walker.py
|
lidavidm/rssfly
|
1cfb893a249e4095412b966a1bf50fc3de7744e7
|
[
"Apache-2.0"
] | 1
|
2021-02-14T03:44:35.000Z
|
2021-02-14T03:44:35.000Z
|
rssfly/extractor/comic_walker.py
|
lidavidm/rssfly
|
1cfb893a249e4095412b966a1bf50fc3de7744e7
|
[
"Apache-2.0"
] | 6
|
2021-07-15T13:03:19.000Z
|
2022-03-26T14:14:14.000Z
|
rssfly/extractor/comic_walker.py
|
lidavidm/rssfly
|
1cfb893a249e4095412b966a1bf50fc3de7744e7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 David Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.parse
import structlog
from bs4 import BeautifulSoup
from rssfly.extractor.common import Chapter, Comic, Context, Extractor
logger = structlog.get_logger(__name__)
class ComicWalkerExtractor(Extractor):
@property
def name(self):
return "comic_walker"
@property
def publisher(self):
return "Kadokawa"
def extract(self, context: Context, comic_id: str) -> Comic:
url = f"https://comic-walker.com/contents/detail/{comic_id}"
logger.info("Fetching from comic-walker.com", url=url)
raw_text = context.get_text(url)
root = BeautifulSoup(raw_text, features="html.parser")
chapter_els = []
chapters = {}
for list_el in root.find_all(class_="acBacknumber-list"):
chapter_els.extend(list_el.find_all("li"))
for chapter_el in chapter_els:
link_el = chapter_el.find("a")
chapter_title = link_el.attrs["title"]
chapter_url = urllib.parse.urljoin(url, link_el.attrs["href"])
chapter_id = chapter_el.find(class_="acBacknumber-title").text.strip()
# Deduplicate by URL
chapters[chapter_url] = Chapter(
chapter_id=chapter_id,
name=chapter_title,
url=chapter_url,
)
chapter_list = list(
sorted(chapters.values(), key=lambda chapter: chapter.chapter_id)
)
comic_name = root.find("div", class_="comicIndex-box").find("h1").text.strip()
return Comic(
publisher=self.publisher,
comic_id=comic_id,
name=comic_name,
url=url,
chapters=chapter_list,
)
| 35.030769
| 86
| 0.651296
|
4a132ed2c1af9ded142ea0f932d3b576726fe1ef
| 461
|
py
|
Python
|
mayan/apps/linking/tests/literals.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
[
"Apache-2.0"
] | 2
|
2021-09-12T19:41:19.000Z
|
2021-09-12T19:41:20.000Z
|
mayan/apps/linking/tests/literals.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
[
"Apache-2.0"
] | 37
|
2021-09-13T01:00:12.000Z
|
2021-10-02T03:54:30.000Z
|
mayan/apps/linking/tests/literals.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
[
"Apache-2.0"
] | 1
|
2021-09-22T13:17:30.000Z
|
2021-09-22T13:17:30.000Z
|
from ..literals import INCLUSION_AND
TEST_SMART_LINK_CONDITION_FOREIGN_DOCUMENT_DATA = 'label'
TEST_SMART_LINK_CONDITION_EXPRESSION = 'title'
TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED = '\'test edited\''
TEST_SMART_LINK_CONDITION_INCLUSION = INCLUSION_AND
TEST_SMART_LINK_CONDITION_OPERATOR = 'icontains'
TEST_SMART_LINK_DYNAMIC_LABEL = '{{ document.label }}'
TEST_SMART_LINK_LABEL_EDITED = 'test edited label'
TEST_SMART_LINK_LABEL = 'test label'
| 41.909091
| 64
| 0.830803
|
4a133026c32306bebcbbf568209e677798d91294
| 1,387
|
py
|
Python
|
synapse/server_notices/worker_server_notices_sender.py
|
zauguin/synapse
|
ea00f18135ce30e8415526ce68585ea90da5b856
|
[
"Apache-2.0"
] | 1
|
2019-08-29T05:52:15.000Z
|
2019-08-29T05:52:15.000Z
|
synapse/server_notices/worker_server_notices_sender.py
|
zauguin/synapse
|
ea00f18135ce30e8415526ce68585ea90da5b856
|
[
"Apache-2.0"
] | null | null | null |
synapse/server_notices/worker_server_notices_sender.py
|
zauguin/synapse
|
ea00f18135ce30e8415526ce68585ea90da5b856
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
class WorkerServerNoticesSender(object):
"""Stub impl of ServerNoticesSender which does nothing"""
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer):
"""
def on_user_syncing(self, user_id):
"""Called when the user performs a sync operation.
Args:
user_id (str): mxid of user who synced
Returns:
Deferred
"""
return defer.succeed(None)
def on_user_ip(self, user_id):
"""Called on the master when a worker process saw a client request.
Args:
user_id (str): mxid
Returns:
Deferred
"""
raise AssertionError("on_user_ip unexpectedly called on worker")
| 29.510638
| 75
| 0.656092
|
4a133057f9802a38b89d26865d0eae962fe7947a
| 7,686
|
py
|
Python
|
landavailability/api/ranking.py
|
alphagov/land-avilability-api
|
048d4eed4caedb7b9f41caa5d69025506b2eb57d
|
[
"MIT"
] | 1
|
2017-07-24T17:00:34.000Z
|
2017-07-24T17:00:34.000Z
|
landavailability/api/ranking.py
|
alphagov/land-availability-api
|
048d4eed4caedb7b9f41caa5d69025506b2eb57d
|
[
"MIT"
] | 23
|
2016-11-21T15:00:11.000Z
|
2019-06-04T07:07:55.000Z
|
landavailability/api/ranking.py
|
alphagov/land-avilability-api
|
048d4eed4caedb7b9f41caa5d69025506b2eb57d
|
[
"MIT"
] | 4
|
2017-03-23T16:42:40.000Z
|
2021-12-01T07:27:30.000Z
|
import pandas as pd
import numpy as np
log = __import__('logging').getLogger(__file__)
def school_site_size_range(**kwargs):
'''Returns the floor space (m^2), as a range, for a school with the
given characteristics.
'''
# size_req on ola
size = school_site_size(**kwargs)
# upper_site_req, lower_site_req on ola
size_range = (size * 0.95, size * 1.5)
return size_range
def school_site_size(num_pupils=0,
num_pupils_post16=0,
school_type='primary_school'):
'''Return the expected floor space (m^2) for the given parameters.
NB pupils post-16 should be included both figures 'num_pupils' and
'num_pupils_post16'.
'''
if school_type == 'secondary_school':
# Deal with sixth form additional space
if num_pupils_post16 > 0:
under16 = num_pupils - num_pupils_post16
return (1050.0 + (6.3 * under16)) + \
(350 + (7 * float(num_pupils_post16)))
else:
return 1050 + (6.3 * float(num_pupils))
elif school_type == 'primary_school':
return 350.0 + (4.1 * float(num_pupils))
else: # default to primary_school
return 350.0 + (4.1 * float(num_pupils))
return 0
class SchoolRankingConfig(object):
'''The attributes of the location ranking for building
schools, that can be plugged into the more general z-values algorithm.
i.e. the extraction of features from the location and query
and the 'ideal' values (whether high is better or not)
'''
def __init__(self, lower_site_req, upper_site_req, school_type):
self.lower_site_req = lower_site_req
self.upper_site_req = upper_site_req
self.school_type = school_type
self.ideal_values = dict([
('area_suitable', 1),
('geoattributes.BROADBAND', 1),
('greenbelt overlap', 0),
('geoattributes.DISTANCE TO BUS STOP', 0),
('geoattributes.DISTANCE TO METRO STATION', 0),
('geoattributes.DISTANCE TO MOTORWAY JUNCTION', 1),
('geoattributes.DISTANCE TO OVERHEAD LINE', 1),
('geoattributes.DISTANCE TO PRIMARY SCHOOL',
0 if school_type == 'secondary_school' else 1),
('geoattributes.DISTANCE TO RAIL STATION', 0),
('geoattributes.DISTANCE TO SECONDARY SCHOOL',
0 if school_type == 'primary_school' else 1),
('geoattributes.DISTANCE TO SUBSTATION', 1),
])
def locations_to_dataframe(self, locations):
'''Converts location objects (as a list or ResultSet) to a DataFrame.
The fields kept are exactly the attributes needed for the scoring.
'''
# TODO
# Check that distances are correctly either euclidean or network.
# Network:
# 'geoattributes.DISTANCE TO BUS STOP_zscore',
# 'geoattributes.DISTANCE TO METRO STATION_zscore',
# 'geoattributes.DISTANCE TO PRIMARY SCHOOL_zscore',
# 'geoattributes.DISTANCE TO RAIL STATION_zscore',
# 'geoattributes.DISTANCE TO SECONDARY SCHOOL_zscore'
# Euclidean:
# 'geoattributes.DISTANCE TO MOTORWAY JUNCTION',
# 'geoattributes.DISTANCE TO OVERHEAD LINE',
# 'geoattributes.DISTANCE TO SUBSTATION'
df = pd.DataFrame([
{
'estimated_floor_space': l.estimated_floor_space,
'geoattributes.BROADBAND': 1.0 if l.nearest_broadband_fast else 0.0,
'greenbelt overlap': l.greenbelt_overlap,
'geoattributes.DISTANCE TO BUS STOP': l.nearest_busstop_distance,
'geoattributes.DISTANCE TO METRO STATION': l.nearest_metrotube_distance,
'geoattributes.DISTANCE TO MOTORWAY JUNCTION': l.nearest_motorway_distance,
'geoattributes.DISTANCE TO OVERHEAD LINE': l.nearest_ohl_distance,
'geoattributes.DISTANCE TO PRIMARY SCHOOL': l.nearest_primary_school_distance,
'geoattributes.DISTANCE TO RAIL STATION': l.nearest_trainstop_distance,
'geoattributes.DISTANCE TO SECONDARY SCHOOL': l.nearest_secondary_school_distance,
'geoattributes.DISTANCE TO SUBSTATION': l.nearest_substation_distance,
}
for l in locations
],
index=[l.id for l in locations])
df = df.apply(lambda x: pd.to_numeric(x, errors='ignore'))
return df
def extract_features(self, df):
'''Create further features, based on the location data and the query.
Inserts them into the df (in-place).
'''
# work out if the site size is suitable
df['area_suitable'] = is_area_suitable(
df['estimated_floor_space'], self.lower_site_req, self.upper_site_req)
def is_area_suitable(area, lower_site_req, upper_site_req):
return (area > lower_site_req) & \
(area < upper_site_req)
def score_results_dataframe(results_dataframe, ranking_config):
'''Given search results (locations) as rows of a dataframe (with columns
roughly scoring_columns), return another dataframe with those rows and
a column 'score'. A higher score means a higher suitability for building
the specified school.
'''
df = results_dataframe
# filter to only the columns that we'll score against
scoring_columns = ranking_config.ideal_values.keys()
df2 = pd.concat([df[col] for col in scoring_columns], axis=1)
# z-score scaling
# (not really necessary because we scale it again, but useful for
# analysis)
if False:
z_score_scaling(df2)
# Rescale minimum = 0 and maximum = 1 for each column
df3 = rescale_columns_0_to_1(df2)
flip_columns_so_1_is_always_best(df3, ranking_config)
# Assume gaps in the data score 0
# NaN -> 0
df3 = df3.fillna(0)
calculate_score(df3)
return df3
def z_score_scaling(df):
'''Given inputs as rows of a dataframe, for every given column (apart from
'area_suitable'), this function scales the values to a z-score and stores
them in new columns '<column>_zscore'.
'''
for col in df.columns:
if col == 'area_suitable':
continue
col_zscore = col + '_zscore'
# zscore calculation: x = (x - column_mean)/column_stdev
col_mean_normalized = df[col] - df[col].mean()
standard_deviation = df[col].std(ddof=0)
if standard_deviation == 0.0:
# can't divide by zero
df[col_zscore] = col_mean_normalized
else:
df[col_zscore] = col_mean_normalized / standard_deviation
def rescale_columns_0_to_1(df):
'''Rescale values in each column so that they are between 0 and 1.'''
return df.apply(
lambda x: (x.astype(float) - min(x)) / ((max(x) - min(x)) or 0.1),
axis=0)
def flip_columns_so_1_is_always_best(df, ranking_config):
'''Given inputs as rows of a dataframe, scaled 0 to 1, flip value of
particular columns, so that 1 is always means a positive thing and 0
negative. Changes the df in-place.'''
missing_ideal_values = set(df.columns) - set(ranking_config.ideal_values)
assert not missing_ideal_values
columns_to_flip = [
col for col, ideal_value in ranking_config.ideal_values.items()
if ideal_value == 0]
for col in columns_to_flip:
df[col] = df[col].map(lambda x: 1.0 - x)
def calculate_score(df):
'''Given inputs as rows of a dataframe, that are scaled 0 to 1, this
function appends a column 'score' for ranking. (Score: bigger=better)
'''
df['score'] = np.linalg.norm(df, axis=1)
| 39.618557
| 98
| 0.647281
|
4a1330bb356730c273398a46882c1f5fad0d8b47
| 5,144
|
py
|
Python
|
long_range_conv/lrc_layers/nufft_layers_1d.py
|
Forgotten/Efficient_Long-Range_Convolutions_for_Point_Clouds
|
1fe364052eca9330edeaeb32c59d0ec5195c12c4
|
[
"MIT"
] | 4
|
2020-10-10T19:45:49.000Z
|
2021-09-24T09:45:38.000Z
|
long_range_conv/lrc_layers/nufft_layers_1d.py
|
Forgotten/Efficient_Long-Range_Convolutions_for_Point_Clouds
|
1fe364052eca9330edeaeb32c59d0ec5195c12c4
|
[
"MIT"
] | null | null | null |
long_range_conv/lrc_layers/nufft_layers_1d.py
|
Forgotten/Efficient_Long-Range_Convolutions_for_Point_Clouds
|
1fe364052eca9330edeaeb32c59d0ec5195c12c4
|
[
"MIT"
] | 1
|
2020-10-22T02:21:31.000Z
|
2020-10-22T02:21:31.000Z
|
import tensorflow as tf
import numpy as np
@tf.function
def gaussianPer(x, tau, L = 2*np.pi):
return tf.exp( -tf.square(x )/(4*tau)) + \
tf.exp( -tf.square(x-L)/(4*tau)) + \
tf.exp( -tf.square(x+L)/(4*tau))
@tf.function
def gaussianDeconv(k, tau):
return tf.sqrt(np.pi/tau)*tf.exp(tf.square(k)*tau)
class NUFFTLayerMultiChannelInitMixed(tf.keras.layers.Layer):
def __init__(self, nChannels, NpointsMesh, xLims, mu1 = 1.0, mu2=0.5):
super(NUFFTLayerMultiChannelInitMixed, self).__init__()
self.nChannels = nChannels
self.NpointsMesh = NpointsMesh
self.mu1 = tf.constant(mu1, dtype=tf.float32)
self.mu2 = tf.constant(mu2, dtype=tf.float32)
# we need the number of points to be odd
assert NpointsMesh % 2 == 1
self.xLims = xLims
self.L = np.abs(xLims[1] - xLims[0])
self.tau = tf.constant(12*(self.L/(2*np.pi*NpointsMesh))**2,
dtype = tf.float32)# the size of the mollifications
self.kGrid = tf.constant((2*np.pi/self.L)*\
np.linspace(-(NpointsMesh//2),
NpointsMesh//2,
NpointsMesh),
dtype = tf.float32)
# we need to define a mesh betwen xLims[0] and xLims[1]
self.xGrid = tf.constant(np.linspace(xLims[0],
xLims[1],
NpointsMesh+1)[:-1],
dtype = tf.float32)
def build(self, input_shape):
print("building the channels")
# we initialize the channel multipliers
self.shift = []
for ii in range(2):
self.shift.append(self.add_weight("std_"+str(ii),
initializer=tf.initializers.ones(),
shape=[1,]))
self.amplitud = []
for ii in range(2):
self.amplitud.append(self.add_weight("bias_"+str(ii),
initializer=tf.initializers.ones(),
shape=[1,]))
@tf.function
def call(self, input):
# we need to add an iterpolation step
Npoints = input.shape[-1]
batch_size = input.shape[0]
diff = tf.expand_dims(input, -1) - tf.reshape(self.xGrid, (1,1, self.NpointsMesh))
# (batch_size, Np*Ncells, NpointsMesh)
array_gaussian = gaussianPer(diff, self.tau, self.L)
# (batch_size, Np*Ncells, NpointsMesh)
array_Gaussian_complex = tf.complex(array_gaussian, 0.0)
# (batch_size, Np*Ncells, NpointsMesh)
fftGauss = tf.signal.fftshift(tf.signal.fft(array_Gaussian_complex),axes=-1)
# (batch_size, Np*Ncells, NpointsMesh)
Deconv = tf.complex(tf.expand_dims(tf.expand_dims(gaussianDeconv(self.kGrid, self.tau), 0),0),0.0)
rfft = tf.multiply(fftGauss, Deconv)
#(batch_size, Np*Ncells,NpointsMesh)
Rerfft = tf.math.real(rfft)
Imrfft = tf.math.imag(rfft)
multiplier1 = tf.expand_dims(tf.expand_dims(self.amplitud[0]*4*np.pi*\
tf.math.reciprocal( tf.square(self.kGrid) + \
tf.square(self.mu1*self.shift[0])), 0),0)
multiplierRe1 = tf.math.real(multiplier1)
multReRefft = tf.multiply(multiplierRe1,Rerfft)
multImRefft = tf.multiply(multiplierRe1,Imrfft)
multfft = tf.complex(multReRefft,multImRefft)
##(batch_size, Np*Ncells, NpointsMesh)
# an alternative method:
# fft = tf.complex(self.multipliersRe[0],self.multipliersIm[0])
# multFFT = tf.multiply(rfft,fft)
multiplier2 = tf.expand_dims(tf.expand_dims(self.amplitud[1]*4*np.pi*\
tf.math.reciprocal( tf.square(self.kGrid) + \
tf.square(self.mu2*self.shift[1])), 0),0)
multiplierRe2 = tf.math.real(multiplier2)
multReRefft2 = tf.multiply(multiplierRe2,Rerfft)
multImRefft2 = tf.multiply(multiplierRe2,Imrfft)
multfft2 = tf.complex(multReRefft2, multImRefft2)
multfftDeconv1 = tf.multiply(multfft, Deconv)
multfftDeconv2 = tf.multiply(multfft2, Deconv)
irfft1 = tf.math.real(tf.signal.ifft(tf.signal.ifftshift(multfftDeconv1,axes=-1)))/(2*np.pi*self.NpointsMesh/self.L)/(2*np.pi)
irfft2 = tf.math.real(tf.signal.ifft(tf.signal.ifftshift(multfftDeconv2,axes=-1)))/(2*np.pi*self.NpointsMesh/self.L)/(2*np.pi)
##(batch_size, Np*Ncells, NpointsMesh)
diag_sum1 = tf.reduce_sum(irfft1*array_gaussian,axis=-1)
##(batch_size,Np*Ncells) part energy
total1 = tf.reduce_sum(tf.reduce_sum(irfft1,axis=1,keepdims=True)*array_gaussian,axis=-1)
##(batch_size,Np*Ncells)
energy1 = total1 - diag_sum1
diag_sum2 = tf.reduce_sum(irfft2*array_gaussian,axis=-1)
##(batch_size,Np*Ncells) part energy
total2 = tf.reduce_sum(tf.reduce_sum(irfft2,axis=1,keepdims=True)*array_gaussian,axis=-1)
##(batch_size,Np*Ncells)
energy2 = total2 - diag_sum2
energy = tf.concat([tf.expand_dims(energy1,axis=-1),tf.expand_dims(energy2,axis=-1)],axis=-1)
return energy
| 43.965812
| 131
| 0.602449
|
4a1330e7acf911e2749c60c567ae406d1471e9ec
| 443
|
py
|
Python
|
73.py
|
thaisNY/GuanabaraPy
|
a0a3acbd9242a39491a365b07562037d7a936bba
|
[
"MIT"
] | null | null | null |
73.py
|
thaisNY/GuanabaraPy
|
a0a3acbd9242a39491a365b07562037d7a936bba
|
[
"MIT"
] | null | null | null |
73.py
|
thaisNY/GuanabaraPy
|
a0a3acbd9242a39491a365b07562037d7a936bba
|
[
"MIT"
] | null | null | null |
campeoes = ('Palmeiras','Cruzeiro','Grêmio','Santos','Corintias','Flamengo','Atlético Mineiro','Atlético Paranaense',
'Internacional','Chapecoense','Botafogo','São Paulo','Fluminense','Vasco da Gama','Bahia',
'Sport','Vitória','Ponta Preta','America','Coritiba')
print(campeoes[: 5])
print(campeoes[16:])
print(sorted(campeoes))
pos = campeoes.index('Chapecoense')
print(f'A posição do Chapecoense foi {pos + 1} lugar')
| 55.375
| 117
| 0.68623
|
4a13311b836867169cfd1ae45ba4a4fd91ed01e9
| 229
|
py
|
Python
|
splinter/__init__.py
|
schurma/splinter
|
521556670097cf189c7ad271663e967cbd9c11df
|
[
"BSD-3-Clause"
] | 2,049
|
2015-01-02T00:54:57.000Z
|
2022-03-25T20:58:09.000Z
|
splinter/__init__.py
|
schurma/splinter
|
521556670097cf189c7ad271663e967cbd9c11df
|
[
"BSD-3-Clause"
] | 557
|
2015-01-09T23:13:11.000Z
|
2022-03-31T08:03:08.000Z
|
splinter/__init__.py
|
jsfehler/splinter
|
3131074686255569ba14a6d342f6ac9593529181
|
[
"BSD-3-Clause"
] | 464
|
2015-01-02T15:56:04.000Z
|
2022-03-19T16:31:30.000Z
|
# Copyright 2016 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from splinter.browser import Browser # NOQA
__version__ = "0.16.0"
| 25.444444
| 55
| 0.755459
|
4a13319d92033112d39967336a68e450a96eb319
| 1,650
|
py
|
Python
|
example/testssh.py
|
hubo1016/vlcp-ssh
|
39001f92375f34f52cd711aa5adbd5b181fdcd05
|
[
"Apache-2.0"
] | null | null | null |
example/testssh.py
|
hubo1016/vlcp-ssh
|
39001f92375f34f52cd711aa5adbd5b181fdcd05
|
[
"Apache-2.0"
] | 1
|
2016-01-15T04:05:12.000Z
|
2016-01-15T07:35:45.000Z
|
example/testssh.py
|
hubo1016/vlcp-ssh
|
39001f92375f34f52cd711aa5adbd5b181fdcd05
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 2015/12/31
:author: think
'''
from __future__ import print_function
from vlcp.server import main
from vlcp.server.module import Module
from vlcp.event.runnable import RoutineContainer
from vlcpssh.sshclient import SSHFactory
from vlcp.utils.connector import TaskPool
from vlcp.config.config import manager
import sys
# Modify following parameters before executing
TARGET = 'localhost'
USERNAME = 'root'
PASSWORD = ''
class MainRoutine(RoutineContainer):
def printall(self, stream):
while True:
for m in stream.prepareRead(self):
yield m
try:
print(stream.readonce())
except EOFError:
break
def main(self):
for m in self.sshfactory.connect(TARGET, username=USERNAME, password=PASSWORD):
yield m
for m in self.sshfactory.execute_command(self.retvalue, 'ls'):
yield m
chan = self.retvalue
self.subroutine(self.printall(chan.stdout))
self.subroutine(self.printall(chan.stderr))
for m in chan.wait(self):
yield m
print(self.retvalue)
class MainModule(Module):
def __init__(self, server):
Module.__init__(self, server)
self.mainroutine = MainRoutine(self.scheduler)
self.taskpool = TaskPool(self.scheduler)
self.sshfactory = SSHFactory(self.taskpool, self.mainroutine)
self.mainroutine.sshfactory = self.sshfactory
self.routines.append(self.mainroutine)
self.routines.append(self.taskpool)
if __name__ == '__main__':
#manager['server.debugging'] = True
main(None, ())
| 30.555556
| 87
| 0.663636
|
4a1332079349afe0641b27c6130090718cb40152
| 7,164
|
py
|
Python
|
gs_quant/analytics/datagrid/data_row.py
|
skyquant2/gs-quant
|
b7e648fa7912b13ad1fd503b643389e34587aa1e
|
[
"Apache-2.0"
] | 4
|
2021-05-11T14:35:53.000Z
|
2022-03-14T03:52:34.000Z
|
gs_quant/analytics/datagrid/data_row.py
|
skyquant2/gs-quant
|
b7e648fa7912b13ad1fd503b643389e34587aa1e
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/analytics/datagrid/data_row.py
|
skyquant2/gs-quant
|
b7e648fa7912b13ad1fd503b643389e34587aa1e
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from abc import ABC
from enum import Enum
from typing import Dict, List, Optional, Union
from gs_quant.analytics.core import BaseProcessor
from gs_quant.data import DataCoordinate
from gs_quant.data.fields import DataDimension
from gs_quant.entities.entity import Entity
DataDimensions = Dict[Union[DataDimension, str], Union[str, float]]
# Override Types
DIMENSIONS_OVERRIDE = 'dimensionsOverride'
PROCESSOR_OVERRIDE = 'processorOverride'
VALUE_OVERRIDE = 'valueOverride'
# Row Types
DATA_ROW = 'dataRow'
ROW_SEPARATOR = 'rowSeparator'
class Override(ABC):
"""Base class for a DataGrid row override"""
def __init__(self,
column_names: List[str]):
""" Abstract Row Override
:param column_names: column names to override with the specified dimensions
"""
self.column_names = column_names
super().__init__()
def as_dict(self) -> Dict:
return {
'columnNames': self.column_names
}
@classmethod
def from_dict(cls, obj, reference_list):
pass
class ValueOverride(Override):
def __init__(self, column_names: List[str], value: Union[float, str, bool]):
"""
Allows the ability to set a cell to a specific value.
:param column_names: Name of columns to apply the value override.
:param value: Value to set to the row and column intersections.
"""
super().__init__(column_names)
self.value = value
def as_dict(self):
override = super().as_dict()
override['type'] = VALUE_OVERRIDE
override['value'] = self.value
return override
@classmethod
def from_dict(cls, obj, ref):
return ValueOverride(column_names=obj.get('columnNames', []), value=obj['value'])
class DimensionsOverride(Override):
def __init__(self,
column_names: List[str],
dimensions: DataDimensions,
coordinate: DataCoordinate):
""" Override dimensions for the given coordinate
:param column_names: column names to override with the specified dimensions
:param dimensions: dict of dimensions to override columns when fetching data
"""
super().__init__(column_names)
# Following coordinate model, convert override dimensions to match coordinate dimension
self.dimensions = {k.value if isinstance(k, Enum) else k: v for k, v in dimensions.items()}
self.coordinate = coordinate
def as_dict(self):
override = super().as_dict()
override['type'] = DIMENSIONS_OVERRIDE
override['dimensions'] = self.dimensions
override['coordinate'] = self.coordinate.as_dict()
return override
@classmethod
def from_dict(cls, obj, reference_list):
parsed_dimensions = {}
data_dimension_map = DataDimension._value2member_map_
for key, value in obj.get('dimensions', {}).items():
if key in data_dimension_map:
parsed_dimensions[DataDimension(key)] = value
else:
parsed_dimensions[key] = value
return DimensionsOverride(column_names=obj.get('columnNames', []),
dimensions=parsed_dimensions,
coordinate=DataCoordinate.from_dict(obj.get('coordinate', {})))
class ProcessorOverride(Override):
def __init__(self,
column_names: List[str],
processor: BaseProcessor):
""" Abstract Row Override
:param column_names: column names to override with the specified dimensions
:param processor: processor to override
"""
super().__init__(column_names=column_names)
self.processor = processor
def as_dict(self):
override = super().as_dict()
override['type'] = PROCESSOR_OVERRIDE
if self.processor:
override['processor'] = self.processor.as_dict()
override['processor']['processorName'] = self.processor.__class__.__name__
else:
override['processor'] = None
override['processor']['processorName'] = None
return override
@classmethod
def from_dict(cls, obj, reference_list):
return ProcessorOverride(column_names=obj.get('columnNames', []),
processor=BaseProcessor.from_dict(obj.get('processor', {}), reference_list))
class RowSeparator:
def __init__(self, name: str):
""" Row Separator
:param name: name of the row separator
"""
self.name = name
def as_dict(self):
return {
'type': ROW_SEPARATOR,
'name': self.name
}
@classmethod
def from_dict(cls, obj):
return RowSeparator(obj['name'])
class DataRow:
"""Row object for DataGrid"""
def __init__(self,
entity: Entity,
overrides: Optional[List[Override]] = None):
""" Data row
:param entity: Specified entity for the DataRow
:param overrides: Optional List of DataRowOverride's for retrieving data
"""
self.entity = entity
self.overrides: List[Override] = overrides or []
def as_dict(self):
data_row = {
'type': DATA_ROW,
'entityId': self.entity.get_marquee_id() if isinstance(self.entity, Entity) else self.entity,
'entityType': self.entity.entity_type().value if isinstance(self.entity, Entity) else ''
}
if len(self.overrides):
data_row['overrides'] = [override.as_dict() for override in self.overrides]
return data_row
@classmethod
def from_dict(cls, obj, reference_list):
overrides = []
for override_dict in obj.get('overrides', []):
override_type = override_dict.get('type')
if override_type == PROCESSOR_OVERRIDE:
override = ProcessorOverride.from_dict(override_dict, reference_list)
elif override_type == DIMENSIONS_OVERRIDE:
override = DimensionsOverride.from_dict(override_dict, reference_list)
else:
override = ValueOverride.from_dict(override_dict, reference_list)
overrides.append(override)
data_row = DataRow(entity=None, overrides=overrides) # Entity gets resolved later
reference_list.append({
'type': DATA_ROW,
'entityId': obj.get('entityId', ''),
'entityType': obj.get('entityType', ''),
'reference': data_row
})
return data_row
| 33.633803
| 109
| 0.63847
|
4a1332b067a53bc252d061d7330f2b48a51e147c
| 17,033
|
py
|
Python
|
kilt/labrinth.py
|
Jefaxe/kilt
|
36885faecc410d7bd7d0248892b37992dbf2a839
|
[
"MIT"
] | 3
|
2021-04-02T19:14:56.000Z
|
2021-04-13T11:37:40.000Z
|
kilt/labrinth.py
|
Jefaxe/kilt
|
36885faecc410d7bd7d0248892b37992dbf2a839
|
[
"MIT"
] | null | null | null |
kilt/labrinth.py
|
Jefaxe/kilt
|
36885faecc410d7bd7d0248892b37992dbf2a839
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import traceback
import urllib.error
import urllib.request
import webbrowser
from kilt import error, config, version
from PIL import Image
labrinth_mod = "https://api.modrinth.com/api/v1/mod"
kilt_doc = "https://github.com/Jefaxe/Kilt/wiki"
labrinth_doc = "https://github.com/modrinth/labrinth/wiki/API-Documentation"
# sets up logging
logging.basicConfig(format="%(levelname)s: %(message)s [%(lineno)d]", level=config.global_level)
logger = logging.getLogger()
class Mod(object):
def define_page(self, mod_struct):
self.name = mod_struct["title"]
self.body = self.long_desc = mod_struct["body"]
self.desc = self.description = mod_struct["description"]
self.id = mod_struct["id"]
def define_stats(self, mod_struct, author="unknown"):
self.date_published = mod_struct["published"]
self.last_updated = mod_struct["updated"]
self.author = author
self.author_url = "https://modrinth.com/user/" + self.author
self.icon_link = mod_struct["icon_url"]
self.license = mod_struct["license"] # this is a dict
self.downloads = mod_struct["downloads"]
self.followers = mod_struct["followers"]
self.discord = mod_struct["discord_url"]
self.donations = mod_struct["donation_urls"]
self.home = "https://modrinth.com/mod/{}".format(mod_struct["slug"])
self.source = mod_struct["source_url"]
self.issues = mod_struct["issues_url"]
def define_categories(self, mod_struct):
self.categories = mod_struct["categories"] # this is a list.
self.mc_versions = mod_struct["versions"] # list again
self.client_req = True if mod_struct["client_side"] == "required" else False
self.server_req = True if mod_struct["server_side"] == "required" else False
self.client_opt = True if mod_struct["client_side"] == "optional" else False
self.server_opt = True if mod_struct["server_side"] == "optional" else False
self.plugin = True if self.server_req and not self.client_req else False
self.client_only = True if self.client_req and not self.server_req else False
self.content_mod = True if self.client_req and self.server_req else False
def init_version(self, mod_struct, spec_version, mcversion=None):
_localSite = labrinth_mod + "/"
http_response = urllib.request.urlopen
try:
mod_version_data = json.loads(
http_response(_localSite + "{}/version".format(self.id)).read())[
0]
if spec_version is not None:
found = False
versions = json.loads(http_response(_localSite + self.id + "/version").read())
for index_value in versions:
if index_value["version_number"] == spec_version:
mod_version_data = \
json.loads(http_response(_localSite + self.id + "/version").read())[
versions.index(index_value)]
found = True
if not found:
raise error.SpecificVersionNotFound(
"{} is not a version of '{}'".format(version, self.name))
self.version = mod_version_data["version_number"]
self.loaders = \
mod_version_data["loaders"]
self.latest_mcversion = \
mod_version_data["game_versions"][-1] if mcversion is None else mcversion
except IndexError: # there is no version
self.version = None
self.loaders = []
self.latest_mcversion = None
self.isFabric = self.is_fabric = True if "fabric" in self.loaders else False
self.isForge = self.is_forge = True if "forge" in self.loaders else False
def __init__(self, mod_struct, author="unknown", spec_version=None, mcversion=None):
self.define_page(mod_struct)
self.define_stats(mod_struct, author=author)
self.init_version(mod_struct, spec_version=spec_version, mcversion=mcversion)
self.define_categories(mod_struct)
self.sha1 = None
self.downloaded = False
def save_icon(self, path=None, createTree=True, resolution=512):
if path is None:
path = "icons/" + self.name + ".png"
if createTree:
os.makedirs("".join(path.rsplit("/", 1)[:-1]), exist_ok=True)
with open(path, "wb") as file:
file.write(urllib.request.urlopen(self.icon_link).read())
if resolution != 512:
img = Image.open(path)
wpercent = (resolution / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((resolution, hsize), Image.ANTIALIAS)
img.save(path)
def web_open(self, siteType="home", index_of_donation=0, open_new_tab=False):
new_window = 1 if open_new_tab else 0
if siteType == "home":
webbrowser.open(self.home, new=new_window)
return True
elif siteType == "discord":
webbrowser.open(self.discord, new=new_window)
return True
elif siteType == "donation":
webbrowser.open(self.donations[index_of_donation], new=new_window)
elif siteType == "source":
webbrowser.open(self.source, new=new_window)
return True
elif siteType == "issues":
webbrowser.open(self.issues, new=new_window)
return True
else:
return False
def download(self, download_folder="mods", specific_version="will default to self.version"):
specific_version = self.version if specific_version == "will default to self.version" else specific_version
# downloads
http_response = urllib.request.urlopen
_localSite = labrinth_mod + "/"
try:
os.makedirs(download_folder, exist_ok=True)
try:
if specific_version is not None:
found = False
versions = json.loads(http_response(_localSite + self.id + "/version").read())
for index_value in versions:
if index_value["version_number"] == specific_version:
mod_version = \
json.loads(http_response(_localSite + self.id + "/version").read())[
versions.index(index_value)][
"files"][0]
found = True
self.version = specific_version
if not found:
raise error.SpecificVersionNotFound(
"{} is not a version of '{}'".format(specific_version, self.name))
else:
mod_version = json.loads(http_response(_localSite + self.id + "/version").read())[0][
"files"][0]
filename = mod_version["filename"]
downloadLink = mod_version[
"url"]
self.sha1 = mod_version["hashes"][
"sha1"]
except IndexError:
raise error.NoVersionFound("mod '{}' has no versions".format(self.name))
try:
if filename in os.listdir(download_folder):
logging.debug(
"[Kilt]{} is already downloaded (note we have only checked the filename, not the SHA1 hash".format(
filename))
except UnboundLocalError:
pass
else:
logging.debug(
"[Kilt] Downloading {mod} from {url}".format(mod=self.name,
url=downloadLink))
with open(download_folder + "/{mod}".format(mod=downloadLink.rsplit("/", 1)[-1]).replace("%20",
" "),
"wb") as modsave:
modsave.write(http_response(downloadLink).read())
self.downloaded = True
except urllib.error.HTTPError:
logging.critical(
"[Labrinth] COULD NOT DOWNLOAD MOD {} because: {}".format(self.name,
traceback.format_exc()))
return self.downloaded
def removekey(d, key):
r = dict(d)
del r[key]
return r
def get_number_of_mods():
return json.loads(urllib.request.urlopen(labrinth_mod + "?").read())[
"total_hits"]
# alias
number_of_mods = get_number_of_mods
def get(search="", mod_id=None, logging_level=config.global_level, modlist=config.modlist_default,
index="relevance",
offset=0,
limit=10, saveDescriptionToFile=config.description_default, search_array=None,
repeat=1, mod_versions=None, categories_meilisearch="", license_=None, mcversions=None, client_side=None,
server_side=None):
# note mod_versions MUST be indexed 1-1 with search_array!!
# create local variables for CPython optimized lookup
if mod_versions is None:
mod_versions = []
if search_array is None:
search_array = []
if mcversions is None:
mcversions = []
http_response = urllib.request.urlopen
_localSite = labrinth_mod + "/"
MOD_OBJECTS = []
logger = logging.getLogger()
logger.setLevel(logging_level)
# make sure arguments are correct
side_dict = {"True": "required",
"False": "unsupported",
"required": "required",
"unsupported": "unsupported",
"None": None}
client_side = side_dict[str(client_side)]
server_side = side_dict[str(server_side)]
logging.debug("Server side: {} | Client Side: {}".format(server_side, client_side))
if index not in {"newest", "updated", "downloads", "relevance"}:
raise error.InvalidArgument(
"{} (index/sort) needs to be either 'newest', 'updated', 'downloads', or 'relevance'".format(index))
if type(limit) is not int or limit not in list(range(0, 101)):
raise error.InvalidArgument("{} (limit) is not in range 0, 100, or is not an integer.".format(limit))
if type(offset) is not int or offset not in list(range(0, 101)):
raise error.InvalidArgument("{} (offset) is not in range 0, 100, or it is not an integer".format(offset))
if type(repeat) is not int or repeat <= 0:
raise error.InvalidArgument("{} (repeat) is not an integer, or it is below 0.".format(repeat))
if client_side not in {"required", "unsupported", None}:
raise error.InvalidArgument("{} (client_side) needs to be either `required` or `unsupported`")
if server_side not in {"required", "unsupported", None}:
raise error.InvalidArgument("{} (server_side) needs to be either `required` or `unsupported`")
# patch arguments
if search != "":
logging.info(
"Using `search` completely disables `search_array. Also note that one element in search_array is faster than using search itself.")
search_array = [search]
search_array = list(map(lambda st: str.replace(st, " ", "%20"), search_array))
if not search_array:
search_array = [""]
categories_meilisearch = categories_meilisearch.replace(" ", "%20")
logging.debug("Mods to search for: {}".format(", ".join(search_array)))
if saveDescriptionToFile: # anything but "False" or "None"
if type(saveDescriptionToFile) is bool:
saveDescriptionToFile = "descriptions.txt"
with open(saveDescriptionToFile, "w") as file:
file.write("Mod Descriptions\n")
if modlist: # anything but "False" or "None"
if type(modlist) is bool:
modlist = "modlist.html"
with open(modlist, "w") as file:
file.write("""<!DOCTYPE html>
<html>
<head>
<title>Modlist</title>
</head>
<body>""")
for offset in range(offset, repeat):
mod_ver = mod_versions[offset] if mod_versions else None
if mod_id:
try:
mod_struct = json.loads(http_response(_localSite + mod_id).read())
except urllib.error.HTTPError:
raise error.InvalidModId("{} is not a valid modrinth mod id".format(mod_id))
mod_object = Mod(mod_struct, spec_version=mod_ver, mcversion=mcversions[0] if mcversions else None)
MOD_OBJECTS.append(mod_object)
for this_search in search_array:
logging.debug("Searching for {}".format(this_search))
facets_bool = False
facets_string = "["
if license_ is not None:
facets_string += '["license:{}"]'.format(license_) + ","
facets_bool = True
if mcversions:
for mcv in mcversions:
facets_string += '["versions:{}"]'.format(mcv) + ","
facets_bool = True
if client_side is not None:
facets_string += '["client_side:{}"]'.format(client_side)
facets_bool = True
if server_side is not None:
facets_string += '["server_side:{}"]"'.format(server_side)
facets_bool = True
if facets_bool:
logging.debug("Fancy! Using facets i see!")
facets_string = facets_string[:-1]
facets_string += "]"
facets = urllib.parse.quote(facets_string)
modSearch = labrinth_mod + "?query={}&limit={}&index={}&offset={}&filters={}&facets={f}".format(
this_search, limit, index, offset, categories_meilisearch, f=facets)
else:
modSearch = labrinth_mod + "?query={}&limit={}&index={}&offset={}&filters={}".format(
this_search, limit, index, offset, categories_meilisearch)
logging.debug("Using {}".format(modSearch))
modSearchJson = json.loads(http_response(modSearch).read())
try:
logging.debug("{} is the {} in search_array".format(this_search, search_array.index(this_search)))
logging.debug(modSearchJson)
mod_response = modSearchJson["hits"][0]
logging.debug("{} is the mod_response of {}".format(mod_response, this_search))
except IndexError:
if offset == 0 and repeat == 1:
logging.info("There were no results for your search")
raise error.EndOfSearch("No results found for your query")
elif offset == 0 and repeat != 1:
logging.info("You hit the end of your search!")
raise error.EndOfSearch(
"You attempted to access search result {} but {} was the max".format(offset + 1, offset))
else:
logging.info(traceback.format_exc())
mod_struct = json.loads(
http_response(_localSite + str(mod_response["mod_id"].replace("local-", ""))).read())
mod_struct_minus_body = removekey(mod_struct, "body")
mod_ver = mod_versions[search_array.index(this_search)] if len(mod_versions) == len(search_array) else None
mod_object = Mod(mod_struct, author=mod_response["author"], spec_version=mod_ver)
MOD_OBJECTS.append(mod_object)
logging.debug("[Kilt] Mod Objects are: {}".format(MOD_OBJECTS))
logging.debug(
"[Labrinth] Requested mod json(minus body): {json}".format(json=mod_struct_minus_body))
# logging.debug("[Modrinth]: {json}".format(json=modSearchJson)
# output events
if saveDescriptionToFile:
with open(saveDescriptionToFile, "a") as desc:
desc.write(mod_struct_minus_body["title"] + ": " + mod_struct_minus_body["description"] + "\n")
if modlist:
with open(modlist, "a") as file:
file.write(
"<image src={} width=64 height=64 alt={}></image><a href={}>{} (by {})</a><p></p>".format(
mod_struct_minus_body["icon_url"],
mod_struct_minus_body["title"],
mod_response["page_url"],
mod_struct_minus_body["title"],
mod_response["author"]))
# deprecation warnings!
if modlist:
with open(modlist, "a") as file:
file.write(""" </body>
</html>""")
return MOD_OBJECTS
# alias
search = get
if __name__ == "__main__":
print("don't run this")
| 47.845506
| 143
| 0.573651
|
4a13336112c6e9da6a175dd29d1d7240b8e42a1b
| 135
|
py
|
Python
|
never_saiddit/reddit/tests/utils.py
|
Damgaard/Never-Saiddit
|
d2b0bac0a39da0f21d8a0e5ed46094786615c41f
|
[
"MIT"
] | null | null | null |
never_saiddit/reddit/tests/utils.py
|
Damgaard/Never-Saiddit
|
d2b0bac0a39da0f21d8a0e5ed46094786615c41f
|
[
"MIT"
] | null | null | null |
never_saiddit/reddit/tests/utils.py
|
Damgaard/Never-Saiddit
|
d2b0bac0a39da0f21d8a0e5ed46094786615c41f
|
[
"MIT"
] | null | null | null |
class FakeReddit(object):
"""A faked reddit instance"""
class auth():
def authorize(code):
return "1234"
| 16.875
| 33
| 0.562963
|
4a13350bd71824ce81e11c55db45a28987fb795c
| 3,763
|
py
|
Python
|
stonesoup/hypothesiser/distance.py
|
JPompeus/Stone-Soup
|
030c60aaf5ff92d7bb53f06e350c0bf58c9af037
|
[
"MIT"
] | null | null | null |
stonesoup/hypothesiser/distance.py
|
JPompeus/Stone-Soup
|
030c60aaf5ff92d7bb53f06e350c0bf58c9af037
|
[
"MIT"
] | 4
|
2020-03-10T13:51:00.000Z
|
2020-03-23T12:38:24.000Z
|
stonesoup/hypothesiser/distance.py
|
JPompeus/Stone-Soup
|
030c60aaf5ff92d7bb53f06e350c0bf58c9af037
|
[
"MIT"
] | 1
|
2019-12-09T14:33:09.000Z
|
2019-12-09T14:33:09.000Z
|
# -*- coding: utf-8 -*-
from .base import Hypothesiser
from ..base import Property
from ..measures import Measure
from ..predictor import Predictor
from ..types.multihypothesis import \
MultipleHypothesis
from ..types.hypothesis import SingleDistanceHypothesis
from ..types.detection import MissedDetection
from ..updater import Updater
class DistanceHypothesiser(Hypothesiser):
"""Prediction Hypothesiser based on a Measure
Generate track predictions at detection times and score each hypothesised
prediction-detection pair using the distance of the supplied
:class:`Measure` class.
"""
predictor = Property(
Predictor,
doc="Predict tracks to detection times")
updater = Property(
Updater,
doc="Updater used to get measurement prediction")
measure = Property(
Measure,
doc="Measure class used to calculate the distance between two states.")
missed_distance = Property(
float,
default=float('inf'),
doc="Distance for a missed detection. Default is set to infinity")
include_all = Property(
bool,
default=False,
doc="If `True`, hypotheses beyond missed distance will be returned. "
"Default `False`")
def hypothesise(self, track, detections, timestamp):
""" Evaluate and return all track association hypotheses.
For a given track and a set of N available detections, return a
MultipleHypothesis object with N+1 detections (first detection is
a 'MissedDetection'), each with an associated distance measure..
Parameters
----------
track: :class:`~.Track`
The track object to hypothesise on
detections: :class:`list`
A list of :class:`~Detection` objects, representing the available
detections.
timestamp: :class:`datetime.datetime`
A timestamp used when evaluating the state and measurement
predictions. Note that if a given detection has a non empty
timestamp, then prediction will be performed according to
the timestamp of the detection.
Returns
-------
: :class:`~.MultipleHypothesis`
A container of :class:`~SingleDistanceHypothesis` objects
"""
hypotheses = list()
# Common state & measurement prediction
prediction = self.predictor.predict(track.state, timestamp=timestamp)
measurement_prediction = self.updater.predict_measurement(
prediction)
# Missed detection hypothesis with distance as 'missed_distance'
hypotheses.append(
SingleDistanceHypothesis(
prediction,
MissedDetection(timestamp=timestamp),
self.missed_distance,
measurement_prediction))
# True detection hypotheses
for detection in detections:
# Re-evaluate prediction
prediction = self.predictor.predict(
track.state, timestamp=detection.timestamp)
# Compute measurement prediction and distance measure
measurement_prediction = self.updater.predict_measurement(
prediction, detection.measurement_model)
distance = self.measure(measurement_prediction, detection)
if self.include_all or distance < self.missed_distance:
# True detection hypothesis
hypotheses.append(
SingleDistanceHypothesis(
prediction,
detection,
distance,
measurement_prediction))
return MultipleHypothesis(sorted(hypotheses, reverse=True))
| 36.533981
| 79
| 0.638852
|
4a1335c495cf401791752eb42cfd2d8b580c9e40
| 2,381
|
py
|
Python
|
driver/options.py
|
koltenfluckiger/pyseleniummanagement
|
46403adb98d0495b61f8273da326ba117178043f
|
[
"MIT",
"Unlicense"
] | null | null | null |
driver/options.py
|
koltenfluckiger/pyseleniummanagement
|
46403adb98d0495b61f8273da326ba117178043f
|
[
"MIT",
"Unlicense"
] | null | null | null |
driver/options.py
|
koltenfluckiger/pyseleniummanagement
|
46403adb98d0495b61f8273da326ba117178043f
|
[
"MIT",
"Unlicense"
] | null | null | null |
try:
from abc import ABC
from typing import List
except ImportError as err:
print("Unable to import: {}".format(err))
exit()
from selenium.webdriver.firefox.options import Options as FirefoxOpts
from selenium.webdriver.chrome.options import Options as ChromeOpts
from selenium.webdriver.safari.service import Service as SafariOpts
class BrowserOptions(ABC):
def factory(self) -> object:
"""Factory function returning options object"""
class ChromeOptions(BrowserOptions):
def __init__(self, arguments: List[str] = [],
extension_paths: List[str] = [], binary_path:str=None) -> None:
self.arguments = arguments
self.extension_paths = extension_paths
self.binary_path = binary_path
def factory(self) -> object:
try:
options = ChromeOpts()
for arg in self.arguments:
options.add_argument(arg)
for ext_path in self.extension_paths:
options.add_extension(ext_path)
if self.binary_path:
options.binary_location = self.binary_path
self.options = options
return options
except Exception as err:
print(err)
class FirefoxOptions(BrowserOptions):
def __init__(self, arguments: List[str] = [],
extension_paths: List[str] = []) -> None:
self.arguments = arguments
self.extension_paths = extension_paths
def factory(self) -> object:
try:
options = FirefoxOpts()
for arg in self.arguments:
options.add_argument(arg)
for ext_path in self.extension_paths:
options.add_extension(ext_path)
self.options = options
return options
except Exception as err:
print(err)
class SafariOptions(BrowserOptions):
def __init__(self, executable_path:str, arguments: List[str] = []) -> None:
self.executable_path = executable_path
self.arguments = arguments
def factory(self) -> List:
try:
opts = []
for arg in self.arguments:
opts.append(arg)
self.opts = opts
service = Service(executable_path=self.executable_path, service_args=opts)
return service
except Exception as err:
print(err)
| 30.922078
| 86
| 0.614028
|
4a13361900f9564ebea3412ece91ae5f77ce4bda
| 6,958
|
py
|
Python
|
commands/role.py
|
Vepnar/UserAnalyzer
|
7059eaf5eb37ae46ede60d688f3733e7cf372f7b
|
[
"Apache-2.0"
] | null | null | null |
commands/role.py
|
Vepnar/UserAnalyzer
|
7059eaf5eb37ae46ede60d688f3733e7cf372f7b
|
[
"Apache-2.0"
] | null | null | null |
commands/role.py
|
Vepnar/UserAnalyzer
|
7059eaf5eb37ae46ede60d688f3733e7cf372f7b
|
[
"Apache-2.0"
] | 2
|
2018-09-10T06:37:49.000Z
|
2018-09-28T10:34:49.000Z
|
from bot import dbot as bot
import discordutil as du
#Add role command
@bot.command(pass_context=True)
async def addrole(ctx):
#Get the author
author = ctx.message.author
#Parse the args from the command
args = du.splitCmd(ctx.message.content)
#Check if the user has permission to add roles
if du.hasPermissions(author) < 1:
#Send a message if they dont
await ctx.send(du.get('role.nopermission',author=author.name,level=2))
#Stop everything
return
#Check if there is 1 role mention
if not len(ctx.message.role_mentions) == 1:
#Send message if dont
await ctx.send(du.get('role.onemention',author=author.name))
#Stop everything
return
#Store the role in a easier to use way
role = ctx.message.role_mentions[0]
#Check how the command is used
if len(args)==1:
#Create the role
if not du.createRole(role.id,args[0],0):
#Send a message if the creation failed
await ctx.send(du.get('role.createfailed',author=author.name))
else:
#Send a message if it was successfull
await ctx.send(du.get('role.createsuccessfull',author=author.name,role=args[0]))
elif len(args)==2:
#Turn second argument into a int
group = du.getInt(args[1])
#Check if the int is nothing
if group is None:
#Send a error message that it is not an int
await ctx.send(du.get('role.notanumber',author=author.name))
#Stop everything
return
#Try to create to role
if not du.createRole(role.id,args[0],group):
#Send a message that it failed
await ctx.send(du.get('role.createfailed',author=author.name))
else:
#Send a message that it was successfull
await ctx.send(du.get('role.createsuccessfull',author=author.name,role=args[0]))
else:
#Send the help messgae
await ctx.send(du.get('role.addhelp'))
#Get a role command
# Group lower than 0 is only optainable thru events
# Group higher than 0 can conflict with other roles
# Group with 0 is selectable for users without any problem
@bot.command(pass_context=True)
async def role(ctx):
#Get the author
author = ctx.message.author
#Get the server
server = author.guild
#Parse the args from the command
args = du.splitCmd(ctx.message.content)
#Check if the args are right
if len(args) == 1:
#Get the id of the role and the group
roleId,group = du.getRole(args[0])
#Check if the role exists
if roleId is None:
#Send a not found message if the role is not found
await ctx.send(du.get('role.notfound',author=author.name))
#Check if the role is selectable
elif 0>group:
#Send error if it is not
await ctx.send(du.get('role.notavailable',author=author.name))
#Check if it is a group role
elif 0<group:
#Get all the other roles in the group from the database
ids = du.getRolesByGroup(group)
#Make an empty array for the roles
noroles = []
#Loop thru the data from the database
for norole in ids:
#Parse the id of the role and add it to the new array
noroles.append(norole[0])
#Loop thru all the roles of the author
for role in author.roles:
#Check if the role of the author conflicts with the new one
if role.id in noroles:
#Remove the conflicting one
await author.remove_roles(role)
#Look for the new role
for role in server.roles:
#Check if it is the new role
if roleId == role.id:
#Add the new role
await author.add_roles(role)
#Send a message if the new role is added
await ctx.send(du.get('role.added',author=author.name))
#Stop this
return
#Send a message that the new role is not found
await ctx.send(du.get('role.notfound',author=author.name))
#Check if the role is group type 0
elif group == 0:
#Check if the user already has this role
for role in author.roles:
#Check if the id is the smae
if roleId == role.id:
#Remove it
await author.remove_roles(role)
#Show a message that it is deleted
await ctx.send(du.get('role.removed',author=author.name))
#Stop everything
return
#Look for the new roles
for role in server.roles:
#Check if it is the same
if roleId == role.id:
#Add it
await author.add_roles(role)
#Send a message that it is added
await ctx.send(du.get('role.added',author=author.name))
#Stop this
return
#Send a message that the new role is not found
await ctx.send(du.get('role.notfound',author=author.name))
else:
#Send a help message
await ctx.send(du.get('role.gethelp'))
@bot.command(pass_context=True)
async def roles(ctx):
#Get all roles from the database
roles = du.getRoles()
#Get the author from the message
author = ctx.message.author
#Check if there are roles in the database
if not roles:
#Return that there are no roles
await ctx.send(du.get('role.noroles',author=author.name))
#Stop this event
return
#Get a nice title for the message
msg = du.get('role.title')
#Loop thru the roles
for role in roles:
#Add the role to the message with formatting
msg+='\n{:10s} {:3d}'.format(role[0].title(),role[1])
#Close the message
msg +='```'
#And send it
await ctx.send(msg)
@bot.command(pass_context=True)
async def delrole(ctx):
#Get the author
author = ctx.message.author
#Parse the args from the command
args = du.splitCmd(ctx.message.content)
#Check if the user has permission to add roles
if du.hasPermissions(author) < 1:
#Send a message if they dont
await ctx.send(du.get('role.nopermission',author=author.name,level=2))
#Stop everything
return
#Check the args
if not len(args) == 1:
#Send message if dont
await ctx.send(du.get('role.onemention',author=author.name))
#Stop everything
return
#Execute a sql command to delete the role
du.voidExecute('DELETE FROM Roles WHERE name=?',[args[0]])
#Send a message that it was successful
await ctx.send(du.get('role.deleted',author=author))
| 39.089888
| 92
| 0.590112
|
4a13367132bd19014188cc7d7307dd2b744bc160
| 1,140
|
py
|
Python
|
xlsxwriter/test/comparison/test_page_breaks05.py
|
dthadi3/XlsxWriter
|
f1801e82240aa9c746ce14948ef95990b83162cf
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2020-07-01T07:24:37.000Z
|
2020-07-01T07:24:37.000Z
|
xlsxwriter/test/comparison/test_page_breaks05.py
|
dthadi3/XlsxWriter
|
f1801e82240aa9c746ce14948ef95990b83162cf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_page_breaks05.py
|
dthadi3/XlsxWriter
|
f1801e82240aa9c746ce14948ef95990b83162cf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('page_breaks05.xlsx')
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with page breaks."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_v_pagebreaks([8, 3, 1, 0])
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
| 27.804878
| 91
| 0.59386
|
4a133851cf2b4c858e76127d0574928ac3588a4a
| 1,693
|
py
|
Python
|
sentiment_model.py
|
GongCQ/pytorch-sentiment-analysis
|
0850c2dc1884a71e1b2a27bcf5b186020c9b3dd7
|
[
"MIT"
] | null | null | null |
sentiment_model.py
|
GongCQ/pytorch-sentiment-analysis
|
0850c2dc1884a71e1b2a27bcf5b186020c9b3dd7
|
[
"MIT"
] | null | null | null |
sentiment_model.py
|
GongCQ/pytorch-sentiment-analysis
|
0850c2dc1884a71e1b2a27bcf5b186020c9b3dd7
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class BERTGRUSentiment(nn.Module):
def __init__(self,
bert,
hidden_dim,
output_dim,
n_layers,
bidirectional,
dropout,
use_mask,
use_ppb):
super().__init__()
self.bert = bert
embedding_dim = bert.config.to_dict()['hidden_size']
self.rnn = nn.GRU(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
batch_first=True,
dropout=0 if n_layers < 2 else dropout)
self.out = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
self.use_mask = use_mask
self.use_ppb = use_ppb
def forward(self, text):
# text = [batch size, sent len]
with torch.no_grad():
attention_mask = (text != 0).long()
embedded = self.bert(text, attention_mask=attention_mask, output_all_encoded_layers=False)[0]
ddd = 0
# embedded = [batch size, sent len, emb dim]
_, hidden = self.rnn(embedded)
# hidden = [n layers * n directions, batch size, emb dim]
if self.rnn.bidirectional:
hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))
else:
hidden = self.dropout(hidden[-1, :, :])
# hidden = [batch size, hid dim]
output = self.out(hidden)
# output = [batch size, out dim]
return output
| 27.306452
| 105
| 0.517425
|
4a13389a0ffe20ca9b2981a36dda9e935cf39ee7
| 18,063
|
py
|
Python
|
rest_framework/tests/generics.py
|
forgingdestiny/django-rest-framework
|
f7fdcd55e451e4a37c518e1916dc2be513edbab5
|
[
"Unlicense"
] | 1
|
2015-02-26T17:30:58.000Z
|
2015-02-26T17:30:58.000Z
|
rest_framework/tests/generics.py
|
forgingdestiny/django-rest-framework
|
f7fdcd55e451e4a37c518e1916dc2be513edbab5
|
[
"Unlicense"
] | null | null | null |
rest_framework/tests/generics.py
|
forgingdestiny/django-rest-framework
|
f7fdcd55e451e4a37c518e1916dc2be513edbab5
|
[
"Unlicense"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.shortcuts import get_object_or_404
from django.test import TestCase
from rest_framework import generics, serializers, status
from rest_framework.tests.utils import RequestFactory
from rest_framework.tests.models import BasicModel, Comment, SlugBasedModel
from rest_framework.compat import six
import json
factory = RequestFactory()
class RootView(generics.ListCreateAPIView):
"""
Example description for OPTIONS.
"""
model = BasicModel
class InstanceView(generics.RetrieveUpdateDestroyAPIView):
"""
Example description for OPTIONS.
"""
model = BasicModel
class SlugSerializer(serializers.ModelSerializer):
slug = serializers.Field() # read only
class Meta:
model = SlugBasedModel
exclude = ('id',)
class SlugBasedInstanceView(InstanceView):
"""
A model with a slug-field.
"""
model = SlugBasedModel
serializer_class = SlugSerializer
class TestRootView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = RootView.as_view()
def test_get_root_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
def test_post_root_view(self):
"""
POST requests to ListCreateAPIView should create a new object.
"""
content = {'text': 'foobar'}
request = factory.post('/', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'id': 4, 'text': 'foobar'})
created = self.objects.get(id=4)
self.assertEqual(created.text, 'foobar')
def test_put_root_view(self):
"""
PUT requests to ListCreateAPIView should not be allowed
"""
content = {'text': 'foobar'}
request = factory.put('/', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": "Method 'PUT' not allowed."})
def test_delete_root_view(self):
"""
DELETE requests to ListCreateAPIView should not be allowed
"""
request = factory.delete('/')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": "Method 'DELETE' not allowed."})
def test_options_root_view(self):
"""
OPTIONS requests to ListCreateAPIView should return metadata
"""
request = factory.options('/')
with self.assertNumQueries(0):
response = self.view(request).render()
expected = {
'parses': [
'application/json',
'application/x-www-form-urlencoded',
'multipart/form-data'
],
'renders': [
'application/json',
'text/html'
],
'name': 'Root',
'description': 'Example description for OPTIONS.'
}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, expected)
def test_post_cannot_set_id(self):
"""
POST requests to create a new object should not be able to set the id.
"""
content = {'id': 999, 'text': 'foobar'}
request = factory.post('/', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'id': 4, 'text': 'foobar'})
created = self.objects.get(id=4)
self.assertEqual(created.text, 'foobar')
class TestInstanceView(TestCase):
def setUp(self):
"""
Create 3 BasicModel intances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = InstanceView.as_view()
self.slug_based_view = SlugBasedInstanceView.as_view()
def test_get_instance_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data[0])
def test_post_instance_view(self):
"""
POST requests to RetrieveUpdateDestroyAPIView should not be allowed
"""
content = {'text': 'foobar'}
request = factory.post('/', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": "Method 'POST' not allowed."})
def test_put_instance_view(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should update an object.
"""
content = {'text': 'foobar'}
request = factory.put('/1', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(2):
response = self.view(request, pk='1').render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_patch_instance_view(self):
"""
PATCH requests to RetrieveUpdateDestroyAPIView should update an object.
"""
content = {'text': 'foobar'}
request = factory.patch('/1', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(2):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_delete_instance_view(self):
"""
DELETE requests to RetrieveUpdateDestroyAPIView should delete an object.
"""
request = factory.delete('/1')
with self.assertNumQueries(2):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response.content, six.b(''))
ids = [obj.id for obj in self.objects.all()]
self.assertEqual(ids, [2, 3])
def test_options_instance_view(self):
"""
OPTIONS requests to RetrieveUpdateDestroyAPIView should return metadata
"""
request = factory.options('/')
with self.assertNumQueries(0):
response = self.view(request).render()
expected = {
'parses': [
'application/json',
'application/x-www-form-urlencoded',
'multipart/form-data'
],
'renders': [
'application/json',
'text/html'
],
'name': 'Instance',
'description': 'Example description for OPTIONS.'
}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, expected)
def test_put_cannot_set_id(self):
"""
PUT requests to create a new object should not be able to set the id.
"""
content = {'id': 999, 'text': 'foobar'}
request = factory.put('/1', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(2):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_put_to_deleted_instance(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should create an object
if it does not currently exist.
"""
self.objects.get(id=1).delete()
content = {'text': 'foobar'}
request = factory.put('/1', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(3):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_put_as_create_on_id_based_url(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should create an object
at the requested url if it doesn't exist.
"""
content = {'text': 'foobar'}
# pk fields can not be created on demand, only the database can set the pk for a new object
request = factory.put('/5', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(3):
response = self.view(request, pk=5).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
new_obj = self.objects.get(pk=5)
self.assertEqual(new_obj.text, 'foobar')
def test_put_as_create_on_slug_based_url(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should create an object
at the requested url if possible, else return HTTP_403_FORBIDDEN error-response.
"""
content = {'text': 'foobar'}
request = factory.put('/test_slug', json.dumps(content),
content_type='application/json')
with self.assertNumQueries(2):
response = self.slug_based_view(request, slug='test_slug').render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'slug': 'test_slug', 'text': 'foobar'})
new_obj = SlugBasedModel.objects.get(slug='test_slug')
self.assertEqual(new_obj.text, 'foobar')
class TestOverriddenGetObject(TestCase):
"""
Test cases for a RetrieveUpdateDestroyAPIView that does NOT use the
queryset/model mechanism but instead overrides get_object()
"""
def setUp(self):
"""
Create 3 BasicModel intances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
class OverriddenGetObjectView(generics.RetrieveUpdateDestroyAPIView):
"""
Example detail view for override of get_object().
"""
model = BasicModel
def get_object(self):
pk = int(self.kwargs['pk'])
return get_object_or_404(BasicModel.objects.all(), id=pk)
self.view = OverriddenGetObjectView.as_view()
def test_overridden_get_object_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data[0])
# Regression test for #285
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
exclude = ('created',)
class CommentView(generics.ListCreateAPIView):
serializer_class = CommentSerializer
model = Comment
class TestCreateModelWithAutoNowAddField(TestCase):
def setUp(self):
self.objects = Comment.objects
self.view = CommentView.as_view()
def test_create_model_with_auto_now_add_field(self):
"""
Regression test for #285
https://github.com/tomchristie/django-rest-framework/issues/285
"""
content = {'email': 'foobar@example.com', 'content': 'foobar'}
request = factory.post('/', json.dumps(content),
content_type='application/json')
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
created = self.objects.get(id=1)
self.assertEqual(created.content, 'foobar')
# Test for particularly ugly regression with m2m in browseable API
class ClassB(models.Model):
name = models.CharField(max_length=255)
class ClassA(models.Model):
name = models.CharField(max_length=255)
childs = models.ManyToManyField(ClassB, blank=True, null=True)
class ClassASerializer(serializers.ModelSerializer):
childs = serializers.PrimaryKeyRelatedField(many=True, source='childs')
class Meta:
model = ClassA
class ExampleView(generics.ListCreateAPIView):
serializer_class = ClassASerializer
model = ClassA
class TestM2MBrowseableAPI(TestCase):
def test_m2m_in_browseable_api(self):
"""
Test for particularly ugly regression with m2m in browseable API
"""
request = factory.get('/', HTTP_ACCEPT='text/html')
view = ExampleView().as_view()
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
class InclusiveFilterBackend(object):
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='foo')
class ExclusiveFilterBackend(object):
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='other')
class TestFilterBackendAppliedToViews(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances to filter on.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.root_view = RootView.as_view()
self.instance_view = InstanceView.as_view()
self.original_root_backend = getattr(RootView, 'filter_backend')
self.original_instance_backend = getattr(InstanceView, 'filter_backend')
def tearDown(self):
setattr(RootView, 'filter_backend', self.original_root_backend)
setattr(InstanceView, 'filter_backend', self.original_instance_backend)
def test_get_root_view_filters_by_name_with_filter_backend(self):
"""
GET requests to ListCreateAPIView should return filtered list.
"""
setattr(RootView, 'filter_backend', InclusiveFilterBackend)
request = factory.get('/')
response = self.root_view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data, [{'id': 1, 'text': 'foo'}])
def test_get_root_view_filters_out_all_models_with_exclusive_filter_backend(self):
"""
GET requests to ListCreateAPIView should return empty list when all models are filtered out.
"""
setattr(RootView, 'filter_backend', ExclusiveFilterBackend)
request = factory.get('/')
response = self.root_view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_get_instance_view_filters_out_name_with_filter_backend(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should raise 404 when model filtered out.
"""
setattr(InstanceView, 'filter_backend', ExclusiveFilterBackend)
request = factory.get('/1')
response = self.instance_view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, {'detail': 'Not found'})
def test_get_instance_view_will_return_single_object_when_filter_does_not_exclude_it(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object when not excluded
"""
setattr(InstanceView, 'filter_backend', InclusiveFilterBackend)
request = factory.get('/1')
response = self.instance_view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foo'})
| 37.166667
| 100
| 0.627415
|
4a1338d9177bb33163c28b3ced027c4a5a94d2e5
| 9,729
|
py
|
Python
|
src/autograd_hacks.py
|
rohancalum/Federated-Learning-PyTorch
|
6785ea90df26ca2d4d5cefc3d08957bc7b807461
|
[
"MIT"
] | null | null | null |
src/autograd_hacks.py
|
rohancalum/Federated-Learning-PyTorch
|
6785ea90df26ca2d4d5cefc3d08957bc7b807461
|
[
"MIT"
] | null | null | null |
src/autograd_hacks.py
|
rohancalum/Federated-Learning-PyTorch
|
6785ea90df26ca2d4d5cefc3d08957bc7b807461
|
[
"MIT"
] | null | null | null |
"""
Library for extracting interesting quantites from autograd, see README.md
Not thread-safe because of module-level variables
Notation:
o: number of output classes (exact Hessian), number of Hessian samples (sampled Hessian)
n: batch-size
do: output dimension (output channels for convolution)
di: input dimension (input channels for convolution)
Hi: per-example Hessian of matmul, shaped as matrix of [dim, dim], indices have been row-vectorized
Hi_bias: per-example Hessian of bias
Oh, Ow: output height, output width (convolution)
Kh, Kw: kernel height, kernel width (convolution)
Jb: batch output Jacobian of matmul, output sensitivity for example,class pair, [o, n, ....]
Jb_bias: as above, but for bias
A, activations: inputs into current layer
B, backprops: backprop values (aka Lop aka Jacobian-vector product) observed at current layer
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
_supported_layers = ['Linear', 'Conv2d'] # Supported layer class types
_hooks_disabled: bool = False # work-around for https://github.com/pytorch/pytorch/issues/25723
_enforce_fresh_backprop: bool = False # global switch to catch double backprop errors on Hessian computation
def add_hooks(model: nn.Module) -> None:
"""
Adds hooks to model to save activations and backprop values.
The hooks will
1. save activations into param.activations during forward pass
2. append backprops to params.backprops_list during backward pass.
Call "remove_hooks(model)" to disable this.
Args:
model:
"""
global _hooks_disabled
_hooks_disabled = False
handles = []
for layer in model.modules():
if _layer_type(layer) in _supported_layers:
handles.append(layer.register_forward_hook(_capture_activations))
handles.append(layer.register_backward_hook(_capture_backprops))
model.__dict__.setdefault('autograd_hacks_hooks', []).extend(handles)
def remove_hooks(model: nn.Module) -> None:
"""
Remove hooks added by add_hooks(model)
"""
assert model == 0, "not working, remove this after fix to https://github.com/pytorch/pytorch/issues/25723"
if not hasattr(model, 'autograd_hacks_hooks'):
print("Warning, asked to remove hooks, but no hooks found")
else:
for handle in model.autograd_hacks_hooks:
handle.remove()
del model.autograd_hacks_hooks
def disable_hooks() -> None:
"""
Globally disable all hooks installed by this library.
"""
global _hooks_disabled
_hooks_disabled = True
def enable_hooks() -> None:
"""the opposite of disable_hooks()"""
global _hooks_disabled
_hooks_disabled = False
def is_supported(layer: nn.Module) -> bool:
"""Check if this layer is supported"""
return _layer_type(layer) in _supported_layers
def _layer_type(layer: nn.Module) -> str:
return layer.__class__.__name__
def _capture_activations(layer: nn.Module, input: List[torch.Tensor], output: torch.Tensor):
"""Save activations into layer.activations in forward pass"""
if _hooks_disabled:
return
assert _layer_type(layer) in _supported_layers, "Hook installed on unsupported layer, this shouldn't happen"
setattr(layer, "activations", input[0].detach())
def _capture_backprops(layer: nn.Module, _input, output):
"""Append backprop to layer.backprops_list in backward pass."""
global _enforce_fresh_backprop
if _hooks_disabled:
return
if _enforce_fresh_backprop:
assert not hasattr(layer, 'backprops_list'), "Seeing result of previous backprop, use clear_backprops(model) to clear"
_enforce_fresh_backprop = False
if not hasattr(layer, 'backprops_list'):
setattr(layer, 'backprops_list', [])
layer.backprops_list.append(output[0].detach())
def clear_backprops(model: nn.Module) -> None:
"""Delete layer.backprops_list in every layer."""
for layer in model.modules():
if hasattr(layer, 'backprops_list'):
del layer.backprops_list
def compute_grad1(model: nn.Module, loss_type: str = 'mean') -> None:
"""
Compute per-example gradients and save them under 'param.grad1'. Must be called after loss.backprop()
Args:
model:
loss_type: either "mean" or "sum" depending whether backpropped loss was averaged or summed over batch
"""
assert loss_type in ('sum', 'mean')
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
assert hasattr(layer, 'activations'), "No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
assert len(layer.backprops_list) == 1, "Multiple backprops detected, make sure to call clear_backprops(model)"
A = layer.activations
n = A.shape[0]
if loss_type == 'mean':
B = layer.backprops_list[0] * n
else: # loss_type == 'sum':
B = layer.backprops_list[0]
if layer_type == 'Linear':
setattr(layer.weight, 'grad1', torch.einsum('ni,nj->nij', B, A))
if layer.bias is not None:
setattr(layer.bias, 'grad1', B)
elif layer_type == 'Conv2d':
A = torch.nn.functional.unfold(A, layer.kernel_size)
B = B.reshape(n, -1, A.shape[-1])
grad1 = torch.einsum('ijk,ilk->ijl', B, A)
shape = [n] + list(layer.weight.shape)
setattr(layer.weight, 'grad1', grad1.reshape(shape))
if layer.bias is not None:
setattr(layer.bias, 'grad1', torch.sum(B, dim=2))
def compute_hess(model: nn.Module,) -> None:
"""Save Hessian under param.hess for each param in the model"""
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
assert hasattr(layer, 'activations'), "No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
if layer_type == 'Linear':
A = layer.activations
B = torch.stack(layer.backprops_list)
n = A.shape[0]
o = B.shape[0]
A = torch.stack([A] * o)
Jb = torch.einsum("oni,onj->onij", B, A).reshape(n*o, -1)
H = torch.einsum('ni,nj->ij', Jb, Jb) / n
setattr(layer.weight, 'hess', H)
if layer.bias is not None:
setattr(layer.bias, 'hess', torch.einsum('oni,onj->ij', B, B)/n)
elif layer_type == 'Conv2d':
Kh, Kw = layer.kernel_size
di, do = layer.in_channels, layer.out_channels
A = layer.activations.detach()
A = torch.nn.functional.unfold(A, (Kh, Kw)) # n, di * Kh * Kw, Oh * Ow
n = A.shape[0]
B = torch.stack([Bt.reshape(n, do, -1) for Bt in layer.backprops_list]) # o, n, do, Oh*Ow
o = B.shape[0]
A = torch.stack([A] * o) # o, n, di * Kh * Kw, Oh*Ow
Jb = torch.einsum('onij,onkj->onik', B, A) # o, n, do, di * Kh * Kw
Hi = torch.einsum('onij,onkl->nijkl', Jb, Jb) # n, do, di*Kh*Kw, do, di*Kh*Kw
Jb_bias = torch.einsum('onij->oni', B)
Hi_bias = torch.einsum('oni,onj->nij', Jb_bias, Jb_bias)
setattr(layer.weight, 'hess', Hi.mean(dim=0))
if layer.bias is not None:
setattr(layer.bias, 'hess', Hi_bias.mean(dim=0))
def backprop_hess(output: torch.Tensor, hess_type: str) -> None:
"""
Call backprop 1 or more times to get values needed for Hessian computation.
Args:
output: prediction of neural network (ie, input of nn.CrossEntropyLoss())
hess_type: type of Hessian propagation, "CrossEntropy" results in exact Hessian for CrossEntropy
Returns:
"""
assert hess_type in ('LeastSquares', 'CrossEntropy')
global _enforce_fresh_backprop
n, o = output.shape
_enforce_fresh_backprop = True
if hess_type == 'CrossEntropy':
batch = F.softmax(output, dim=1)
mask = torch.eye(o).expand(n, o, o)
diag_part = batch.unsqueeze(2).expand(n, o, o) * mask
outer_prod_part = torch.einsum('ij,ik->ijk', batch, batch)
hess = diag_part - outer_prod_part
assert hess.shape == (n, o, o)
for i in range(n):
hess[i, :, :] = symsqrt(hess[i, :, :])
hess = hess.transpose(0, 1)
elif hess_type == 'LeastSquares':
hess = []
assert len(output.shape) == 2
batch_size, output_size = output.shape
id_mat = torch.eye(output_size)
for out_idx in range(output_size):
hess.append(torch.stack([id_mat[out_idx]] * batch_size))
for o in range(o):
output.backward(hess[o], retain_graph=True)
def symsqrt(a, cond=None, return_rank=False, dtype=torch.float32):
"""Symmetric square root of a positive semi-definite matrix.
See https://github.com/pytorch/pytorch/issues/25481"""
s, u = torch.symeig(a, eigenvectors=True)
cond_dict = {torch.float32: 1e3 * 1.1920929e-07, torch.float64: 1E6 * 2.220446049250313e-16}
if cond in [None, -1]:
cond = cond_dict[dtype]
above_cutoff = (abs(s) > cond * torch.max(abs(s)))
psigma_diag = torch.sqrt(s[above_cutoff])
u = u[:, above_cutoff]
B = u @ torch.diag(psigma_diag) @ u.t()
if return_rank:
return B, len(psigma_diag)
else:
return B
| 35.637363
| 126
| 0.64282
|
4a1338f63a6e2a44612784726b94a7d8d944d507
| 2,077
|
py
|
Python
|
setup.py
|
lehtiolab/msstitch
|
c497dfb4b76bfe1f69e162130739feb0df0c8888
|
[
"MIT"
] | 2
|
2020-11-17T22:18:50.000Z
|
2022-03-31T17:47:24.000Z
|
setup.py
|
lehtiolab/msstitch
|
c497dfb4b76bfe1f69e162130739feb0df0c8888
|
[
"MIT"
] | 1
|
2020-09-23T10:33:05.000Z
|
2020-09-23T10:33:05.000Z
|
setup.py
|
lehtiolab/msstitch
|
c497dfb4b76bfe1f69e162130739feb0df0c8888
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
###################################################################
NAME = 'msstitch'
PACKAGES = find_packages(where='src')
KEYWORDS = ['mass spectrometry', 'proteomics', 'processing']
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Bio-Informatics',
]
INSTALL_REQUIRES = ['numpy', 'lxml', 'biopython']
METADATA = {
'version': '3.8',
'title': 'msstitch',
'description': 'MS proteomics post processing utilities',
'uri': 'https://github.com/lehtiolab/msstitch',
'author': 'Jorrit Boekel',
'email': 'jorrit.boekel@scilifelab.se',
'license': 'MIT',
'copyright': 'Copyright (c) 2013 Jorrit Boekel',
}
CLI = {'console_scripts': ['msstitch=app.msstitch:main']}
###################################################################
from os import path
with open(path.join(path.abspath(path.dirname(__file__)), 'README.md'), encoding='utf-8') as fp:
long_description = fp.read()
if __name__ == '__main__':
setup(
name=NAME,
description=METADATA['description'],
license=METADATA['license'],
url=METADATA['uri'],
version=METADATA['version'],
author=METADATA['author'],
author_email=METADATA['email'],
maintainer=METADATA['author'],
maintainer_email=METADATA['email'],
keywords=KEYWORDS,
packages=PACKAGES,
package_dir={'': 'src'},
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
entry_points=CLI,
)
| 33.5
| 96
| 0.605681
|
4a133a65af4183e26ba68715e8a277765e22fb64
| 54,076
|
py
|
Python
|
indra/explanation/model_checker.py
|
RohitChattopadhyay/indra
|
a688e8cd46e876a299824c60cf4f6af8618f03da
|
[
"BSD-2-Clause"
] | null | null | null |
indra/explanation/model_checker.py
|
RohitChattopadhyay/indra
|
a688e8cd46e876a299824c60cf4f6af8618f03da
|
[
"BSD-2-Clause"
] | null | null | null |
indra/explanation/model_checker.py
|
RohitChattopadhyay/indra
|
a688e8cd46e876a299824c60cf4f6af8618f03da
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function, unicode_literals, absolute_import
from builtins import dict, str
from future.utils import python_2_unicode_compatible
import logging
import numbers
import textwrap
import networkx as nx
import itertools
import numpy as np
import scipy.stats
from copy import deepcopy
from collections import deque
import kappy
from pysb import WILD, export, Observable, ComponentSet
from pysb.core import as_complex_pattern, ComponentDuplicateNameError
from indra.statements import *
from indra.assemblers.pysb import assembler as pa
from collections import Counter
from indra.assemblers.pysb.kappa_util import im_json_to_graph
try:
import paths_graph as pg
has_pg = True
except ImportError:
has_pg = False
logger = logging.getLogger(__name__)
class PathMetric(object):
"""Describes results of simple path search (path existence).
Attributes
----------
source_node : str
The source node of the path
target_node : str
The target node of the path
polarity : int
The polarity of the path between source and target
length : int
The length of the path
"""
def __init__(self, source_node, target_node, polarity, length):
self.source_node = source_node
self.target_node = target_node
self.polarity = polarity
self.length = length
def __repr__(self):
return str(self)
@python_2_unicode_compatible
def __str__(self):
return ('source_node: %s, target_node: %s, polarity: %s, length: %d' %
(self.source_node, self.target_node, self.polarity,
self.length))
class PathResult(object):
"""Describes results of running the ModelChecker on a single Statement.
Attributes
----------
path_found : bool
True if a path was found, False otherwise.
result_code : string
- *STATEMENT_TYPE_NOT_HANDLED* - The provided statement type is not
handled
- *SUBJECT_MONOMERS_NOT_FOUND* - Statement subject not found in model
- *OBSERVABLES_NOT_FOUND* - Statement has no associated observable
- *NO_PATHS_FOUND* - Statement has no path for any observable
- *MAX_PATH_LENGTH_EXCEEDED* - Statement has no path len <=
MAX_PATH_LENGTH
- *PATHS_FOUND* - Statement has path len <= MAX_PATH_LENGTH
- *INPUT_RULES_NOT_FOUND* - No rules with Statement subject found
- *MAX_PATHS_ZERO* - Path found but MAX_PATHS is set to zero
max_paths : int
The maximum number of specific paths to return for each Statement
to be explained.
max_path_length : int
The maximum length of specific paths to return.
path_metrics : list[:py:class:`indra.explanation.model_checker.PathMetric`]
A list of PathMetric objects, each describing the results of a simple
path search (path existence).
paths : list[list[tuple[str, int]]]
A list of paths obtained from path finding. Each path is a list of
tuples (which are edges in the path), with the first element of the
tuple the name of a rule, and the second element its polarity in the
path.
"""
def __init__(self, path_found, result_code, max_paths, max_path_length):
self.path_found = path_found
self.result_code = result_code
self.max_paths = max_paths
self.max_path_length = max_path_length
self.path_metrics = []
self.paths = []
def add_path(self, path):
self.paths.append(path)
def add_metric(self, path_metric):
self.path_metrics.append(path_metric)
@python_2_unicode_compatible
def __str__(self):
summary = textwrap.dedent("""
PathResult:
path_found: {path_found}
result_code: {result_code}
path_metrics: {path_metrics}
paths: {paths}
max_paths: {max_paths}
max_path_length: {max_path_length}""")
ws = '\n '
# String representation of path metrics
if not self.path_metrics:
pm_str = str(self.path_metrics)
else:
pm_str = ws + ws.join(['%d: %s' % (pm_ix, pm) for pm_ix, pm in
enumerate(self.path_metrics)])
def format_path(path, num_spaces=11):
path_ws = '\n' + (' ' * num_spaces)
return path_ws.join([str(p) for p in path])
# String representation of paths
if not self.paths:
path_str = str(self.paths)
else:
path_str = ws + ws.join(['%d: %s' % (p_ix, format_path(p))
for p_ix, p in enumerate(self.paths)])
return summary.format(path_found=self.path_found,
result_code=self.result_code,
max_paths=self.max_paths,
max_path_length=self.max_path_length,
path_metrics=pm_str, paths=path_str)
def __repr__(self):
return str(self)
class ModelChecker(object):
"""Check a PySB model against a set of INDRA statements.
Parameters
----------
model : pysb.Model
A PySB model to check.
statements : Optional[list[indra.statements.Statement]]
A list of INDRA Statements to check the model against.
agent_obs: Optional[list[indra.statements.Agent]]
A list of INDRA Agents in a given state to be observed.
do_sampling : bool
Whether to use breadth-first search or weighted sampling to
generate paths. Default is False (breadth-first search).
seed : int
Random seed for sampling (optional, default is None).
"""
def __init__(self, model, statements=None, agent_obs=None,
do_sampling=False, seed=None):
self.model = model
if statements:
self.statements = statements
else:
self.statements = []
if agent_obs:
self.agent_obs = agent_obs
else:
self.agent_obs = []
if seed is not None:
np.random.seed(seed)
# Whether to do sampling
self.do_sampling = do_sampling
# Influence map
self._im = None
# Map from statements to associated observables
self.stmt_to_obs = {}
# Map from agents to associated observables
self.agent_to_obs = {}
# Map between rules and downstream observables
self.rule_obs_dict = {}
def add_statements(self, stmts):
"""Add to the list of statements to check against the model.
Parameters
----------
stmts : list[indra.statements.Statement]
The list of Statements to be added for checking.
"""
self.statements += stmts
def generate_im(self, model):
"""Return a graph representing the influence map generated by Kappa
Parameters
----------
model : pysb.Model
The PySB model whose influence map is to be generated
Returns
-------
graph : networkx.MultiDiGraph
A MultiDiGraph representing the influence map
"""
kappa = kappy.KappaStd()
model_str = export.export(model, 'kappa')
kappa.add_model_string(model_str)
kappa.project_parse()
imap = kappa.analyses_influence_map(accuracy='medium')
graph = im_json_to_graph(imap)
return graph
def draw_im(self, fname):
"""Draw and save the influence map in a file.
Parameters
----------
fname : str
The name of the file to save the influence map in.
The extension of the file will determine the file format,
typically png or pdf.
"""
im = self.get_im()
im_agraph = nx.nx_agraph.to_agraph(im)
im_agraph.draw(fname, prog='dot')
def get_im(self, force_update=False):
"""Get the influence map for the model, generating it if necessary.
Parameters
----------
force_update : bool
Whether to generate the influence map when the function is called.
If False, returns the previously generated influence map if
available. Defaults to True.
Returns
-------
networkx MultiDiGraph object containing the influence map.
The influence map can be rendered as a pdf using the dot layout
program as follows::
im_agraph = nx.nx_agraph.to_agraph(influence_map)
im_agraph.draw('influence_map.pdf', prog='dot')
"""
if self._im and not force_update:
return self._im
if not self.model:
raise Exception("Cannot get influence map if there is no model.")
def add_obs_for_agent(agent):
obj_mps = list(pa.grounded_monomer_patterns(self.model, agent))
if not obj_mps:
logger.debug('No monomer patterns found in model for agent %s, '
'skipping' % agent)
return
obs_list = []
for obj_mp in obj_mps:
obs_name = _monomer_pattern_label(obj_mp) + '_obs'
# Add the observable
obj_obs = Observable(obs_name, obj_mp, _export=False)
obs_list.append(obs_name)
try:
self.model.add_component(obj_obs)
except ComponentDuplicateNameError as e:
pass
return obs_list
# Create observables for all statements to check, and add to model
# Remove any existing observables in the model
self.model.observables = ComponentSet([])
for stmt in self.statements:
# Generate observables for Modification statements
if isinstance(stmt, Modification):
mod_condition_name = modclass_to_modtype[stmt.__class__]
if isinstance(stmt, RemoveModification):
mod_condition_name = modtype_to_inverse[mod_condition_name]
# Add modification to substrate agent
modified_sub = _add_modification_to_agent(stmt.sub,
mod_condition_name, stmt.residue,
stmt.position)
obs_list = add_obs_for_agent(modified_sub)
# Associate this statement with this observable
self.stmt_to_obs[stmt] = obs_list
# Generate observables for Activation/Inhibition statements
elif isinstance(stmt, RegulateActivity):
regulated_obj, polarity = \
_add_activity_to_agent(stmt.obj, stmt.obj_activity,
stmt.is_activation)
obs_list = add_obs_for_agent(regulated_obj)
# Associate this statement with this observable
self.stmt_to_obs[stmt] = obs_list
elif isinstance(stmt, RegulateAmount):
obs_list = add_obs_for_agent(stmt.obj)
self.stmt_to_obs[stmt] = obs_list
elif isinstance(stmt, Influence):
obs_list = add_obs_for_agent(stmt.obj.concept)
self.stmt_to_obs[stmt] = obs_list
# Add observables for each agent
for ag in self.agent_obs:
obs_list = add_obs_for_agent(ag)
self.agent_to_obs[ag] = obs_list
logger.info("Generating influence map")
self._im = self.generate_im(self.model)
#self._im.is_multigraph = lambda: False
# Now, for every rule in the model, check if there are any observables
# downstream; alternatively, for every observable in the model, get a
# list of rules.
# We'll need the dictionary to check if nodes are observables
node_attributes = nx.get_node_attributes(self._im, 'node_type')
for rule in self.model.rules:
obs_list = []
# Get successors of the rule node
for neighb in self._im.neighbors(rule.name):
# Check if the node is an observable
if node_attributes[neighb] != 'variable':
continue
# Get the edge and check the polarity
edge_sign = _get_edge_sign(self._im, (rule.name, neighb))
obs_list.append((neighb, edge_sign))
self.rule_obs_dict[rule.name] = obs_list
return self._im
def check_model(self, max_paths=1, max_path_length=5):
"""Check all the statements added to the ModelChecker.
Parameters
----------
max_paths : Optional[int]
The maximum number of specific paths to return for each Statement
to be explained. Default: 1
max_path_length : Optional[int]
The maximum length of specific paths to return. Default: 5
Returns
-------
list of (Statement, PathResult)
Each tuple contains the Statement checked against the model and
a PathResult object describing the results of model checking.
"""
results = []
for idx, stmt in enumerate(self.statements):
logger.info('---')
logger.info('Checking statement (%d/%d): %s' % \
(idx + 1, len(self.statements), stmt))
result = self.check_statement(stmt, max_paths, max_path_length)
results.append((stmt, result))
return results
def check_statement(self, stmt, max_paths=1, max_path_length=5):
"""Check a single Statement against the model.
Parameters
----------
stmt : indra.statements.Statement
The Statement to check.
max_paths : Optional[int]
The maximum number of specific paths to return for each Statement
to be explained. Default: 1
max_path_length : Optional[int]
The maximum length of specific paths to return. Default: 5
Returns
-------
boolean
True if the model satisfies the Statement.
"""
# Make sure the influence map is initialized
self.get_im()
# Check if this is one of the statement types that we can check
if not isinstance(stmt, (Modification, RegulateAmount,
RegulateActivity, Influence)):
logger.info('Statement type %s not handled' %
stmt.__class__.__name__)
return PathResult(False, 'STATEMENT_TYPE_NOT_HANDLED',
max_paths, max_path_length)
# Get the polarity for the statement
if isinstance(stmt, Modification):
target_polarity = -1 if isinstance(stmt, RemoveModification) else 1
elif isinstance(stmt, RegulateActivity):
target_polarity = 1 if stmt.is_activation else -1
elif isinstance(stmt, RegulateAmount):
target_polarity = -1 if isinstance(stmt, DecreaseAmount) else 1
elif isinstance(stmt, Influence):
target_polarity = -1 if stmt.overall_polarity() == -1 else 1
# Get the subject and object (works also for Modifications)
subj, obj = stmt.agent_list()
# Get a list of monomer patterns matching the subject FIXME Currently
# this will match rules with the corresponding monomer pattern on it.
# In future, this statement should (possibly) also match rules in which
# 1) the agent is in its active form, or 2) the agent is tagged as the
# enzyme in a rule of the appropriate activity (e.g., a phosphorylation
# rule) FIXME
if subj is not None:
subj_mps = list(pa.grounded_monomer_patterns(self.model, subj,
ignore_activities=True))
if not subj_mps:
return PathResult(False, 'SUBJECT_MONOMERS_NOT_FOUND',
max_paths, max_path_length)
else:
subj_mps = [None]
# Observables may not be found for an activation since there may be no
# rule in the model activating the object, and the object may not have
# an "active" site of the appropriate type
obs_names = self.stmt_to_obs[stmt]
if not obs_names:
logger.info("No observables for stmt %s, returning False" % stmt)
return PathResult(False, 'OBSERVABLES_NOT_FOUND',
max_paths, max_path_length)
for subj_mp, obs_name in itertools.product(subj_mps, obs_names):
# NOTE: Returns on the path found for the first enz_mp/obs combo
result = self._find_im_paths(subj_mp, obs_name, target_polarity,
max_paths, max_path_length)
# If a path was found, then we return it; otherwise, that means
# there was no path for this observable, so we have to try the next
# one
if result.path_found:
logger.info('Found paths for %s' % stmt)
return result
# If we got here, then there was no path for any observable
logger.info('No paths found for %s' % stmt)
return PathResult(False, 'NO_PATHS_FOUND',
max_paths, max_path_length)
def _get_input_rules(self, subj_mp):
if subj_mp is None:
raise ValueError("Cannot take None as an argument for subj_mp.")
input_rules = _match_lhs(subj_mp, self.model.rules)
logger.debug('Found %s input rules matching %s' %
(len(input_rules), str(subj_mp)))
# Filter to include only rules where the subj_mp is actually the
# subject (i.e., don't pick up upstream rules where the subject
# is itself a substrate/object)
# FIXME: Note that this will eliminate rules where the subject
# being checked is included on the left hand side as
# a bound condition rather than as an enzyme.
subj_rules = pa.rules_with_annotation(self.model,
subj_mp.monomer.name,
'rule_has_subject')
logger.debug('%d rules with %s as subject' %
(len(subj_rules), subj_mp.monomer.name))
input_rule_set = set([r.name for r in input_rules]).intersection(
set([r.name for r in subj_rules]))
logger.debug('Final input rule set contains %d rules' %
len(input_rule_set))
return input_rule_set
def _sample_paths(self, input_rule_set, obs_name, target_polarity,
max_paths=1, max_path_length=5):
if max_paths == 0:
raise ValueError("max_paths cannot be 0 for path sampling.")
# Convert path polarity representation from 0/1 to 1/-1
def convert_polarities(path_list):
return [tuple((n[0], 0 if n[1] > 0 else 1)
for n in path)
for path in path_list]
pg_polarity = 0 if target_polarity > 0 else 1
nx_graph = _im_to_signed_digraph(self.get_im())
# Add edges from dummy node to input rules
source_node = 'SOURCE_NODE'
for rule in input_rule_set:
nx_graph.add_edge(source_node, rule, sign=0)
# -------------------------------------------------
# Create combined paths_graph
f_level, b_level = pg.get_reachable_sets(nx_graph, source_node,
obs_name, max_path_length,
signed=True)
pg_list = []
for path_length in range(1, max_path_length+1):
cfpg = pg.CFPG.from_graph(
nx_graph, source_node, obs_name, path_length, f_level,
b_level, signed=True, target_polarity=pg_polarity)
pg_list.append(cfpg)
combined_pg = pg.CombinedCFPG(pg_list)
# Make sure the combined paths graph is not empty
if not combined_pg.graph:
pr = PathResult(False, 'NO_PATHS_FOUND', max_paths, max_path_length)
pr.path_metrics = None
pr.paths = []
return pr
# Get a dict of rule objects
rule_obj_dict = {}
for ann in self.model.annotations:
if ann.predicate == 'rule_has_object':
rule_obj_dict[ann.subject] = ann.object
# Get monomer initial conditions
ic_dict = {}
for mon in self.model.monomers:
# FIXME: A hack that depends on the _0 convention
ic_name = '%s_0' % mon.name
# TODO: Wrap this in try/except?
ic_param = self.model.parameters[ic_name]
ic_value = ic_param.value
ic_dict[mon.name] = ic_value
# Set weights in PG based on model initial conditions
for cur_node in combined_pg.graph.nodes():
edge_weights = {}
rule_obj_list = []
edge_weights_by_gene = {}
for u, v in combined_pg.graph.out_edges(cur_node):
v_rule = v[1][0]
# Get the object of the rule (a monomer name)
rule_obj = rule_obj_dict.get(v_rule)
if rule_obj:
# Add to list so we can count instances by gene
rule_obj_list.append(rule_obj)
# Get the abundance of rule object from the initial
# conditions
# TODO: Wrap in try/except?
ic_value = ic_dict[rule_obj]
else:
ic_value = 1.0
edge_weights[(u, v)] = ic_value
edge_weights_by_gene[rule_obj] = ic_value
# Get frequency of different rule objects
rule_obj_ctr = Counter(rule_obj_list)
# Normalize results by weight sum and gene frequency at this level
edge_weight_sum = sum(edge_weights_by_gene.values())
edge_weights_norm = {}
for e, v in edge_weights.items():
v_rule = e[1][1][0]
rule_obj = rule_obj_dict.get(v_rule)
if rule_obj:
rule_obj_count = rule_obj_ctr[rule_obj]
else:
rule_obj_count = 1
edge_weights_norm[e] = ((v / float(edge_weight_sum)) /
float(rule_obj_count))
# Add edge weights to paths graph
nx.set_edge_attributes(combined_pg.graph, name='weight',
values=edge_weights_norm)
# Sample from the combined CFPG
paths = combined_pg.sample_paths(max_paths)
# -------------------------------------------------
if paths:
pr = PathResult(True, 'PATHS_FOUND', max_paths, max_path_length)
pr.path_metrics = None
# Convert path polarity representation from 0/1 to 1/-1
pr.paths = convert_polarities(paths)
# Strip off the SOURCE_NODE prefix
pr.paths = [p[1:] for p in pr.paths]
else:
assert False
pr = PathResult(False, 'NO_PATHS_FOUND', max_paths, max_path_length)
pr.path_metrics = None
pr.paths = []
return pr
def _find_im_paths(self, subj_mp, obs_name, target_polarity,
max_paths=1, max_path_length=5):
"""Check for a source/target path in the influence map.
Parameters
----------
subj_mp : pysb.MonomerPattern
MonomerPattern corresponding to the subject of the Statement
being checked.
obs_name : str
Name of the PySB model Observable corresponding to the
object/target of the Statement being checked.
target_polarity : int
Whether the influence in the Statement is positive (1) or negative
(-1).
Returns
-------
PathResult
PathResult object indicating the results of the attempt to find
a path.
"""
logger.info(('Running path finding with max_paths=%d,'
' max_path_length=%d') % (max_paths, max_path_length))
# Find rules in the model corresponding to the input
if subj_mp is None:
input_rule_set = None
else:
input_rule_set = self._get_input_rules(subj_mp)
if not input_rule_set:
logger.info('Input rules not found for %s' % subj_mp)
return PathResult(False, 'INPUT_RULES_NOT_FOUND',
max_paths, max_path_length)
logger.info('Checking path metrics between %s and %s with polarity %s' %
(subj_mp, obs_name, target_polarity))
# -- Route to the path sampling function --
if self.do_sampling:
if not has_pg:
raise Exception('The paths_graph package could not be '
'imported.')
return self._sample_paths(input_rule_set, obs_name, target_polarity,
max_paths, max_path_length)
# -- Do Breadth-First Enumeration --
# Generate the predecessors to our observable and count the paths
path_lengths = []
path_metrics = []
for source, polarity, path_length in \
_find_sources(self.get_im(), obs_name, input_rule_set,
target_polarity):
pm = PathMetric(source, obs_name, polarity, path_length)
path_metrics.append(pm)
path_lengths.append(path_length)
logger.info('Finding paths between %s and %s with polarity %s' %
(subj_mp, obs_name, target_polarity))
# Now, look for paths
paths = []
if path_metrics and max_paths == 0:
pr = PathResult(True, 'MAX_PATHS_ZERO',
max_paths, max_path_length)
pr.path_metrics = path_metrics
return pr
elif path_metrics:
if min(path_lengths) <= max_path_length:
pr = PathResult(True, 'PATHS_FOUND', max_paths, max_path_length)
pr.path_metrics = path_metrics
# Get the first path
path_iter = enumerate(_find_sources_with_paths(
self.get_im(), obs_name,
input_rule_set, target_polarity))
for path_ix, path in path_iter:
flipped = _flip(self.get_im(), path)
pr.add_path(flipped)
if len(pr.paths) >= max_paths:
break
return pr
# There are no paths shorter than the max path length, so we
# don't bother trying to get them
else:
pr = PathResult(True, 'MAX_PATH_LENGTH_EXCEEDED',
max_paths, max_path_length)
pr.path_metrics = path_metrics
return pr
else:
return PathResult(False, 'NO_PATHS_FOUND',
max_paths, max_path_length)
def score_paths(self, paths, agents_values, loss_of_function=False,
sigma=0.15, include_final_node=False):
"""Return scores associated with a given set of paths.
Parameters
----------
paths : list[list[tuple[str, int]]]
A list of paths obtained from path finding. Each path is a list
of tuples (which are edges in the path), with the first element
of the tuple the name of a rule, and the second element its
polarity in the path.
agents_values : dict[indra.statements.Agent, float]
A dictionary of INDRA Agents and their corresponding measured
value in a given experimental condition.
loss_of_function : Optional[boolean]
If True, flip the polarity of the path. For instance, if the effect
of an inhibitory drug is explained, set this to True.
Default: False
sigma : Optional[float]
The estimated standard deviation for the normally distributed
measurement error in the observation model used to score paths
with respect to data. Default: 0.15
include_final_node : Optional[boolean]
Determines whether the final node of the path is included in the
score. Default: False
"""
obs_model = lambda x: scipy.stats.norm(x, sigma)
# Build up dict mapping observables to values
obs_dict = {}
for ag, val in agents_values.items():
obs_list = self.agent_to_obs[ag]
if obs_list is not None:
for obs in obs_list:
obs_dict[obs] = val
# For every path...
path_scores = []
for path in paths:
logger.info('------')
logger.info("Scoring path:")
logger.info(path)
# Look at every node in the path, excluding the final
# observable...
path_score = 0
last_path_node_index = -1 if include_final_node else -2
for node, sign in path[:last_path_node_index]:
# ...and for each node check the sign to see if it matches the
# data. So the first thing is to look at what's downstream
# of the rule
# affected_obs is a list of observable names alogn
for affected_obs, rule_obs_sign in self.rule_obs_dict[node]:
flip_polarity = -1 if loss_of_function else 1
pred_sign = sign * rule_obs_sign * flip_polarity
# Check to see if this observable is in the data
logger.info('%s %s: effect %s %s' %
(node, sign, affected_obs, pred_sign))
measured_val = obs_dict.get(affected_obs)
if measured_val:
# For negative predictions use CDF (prob that given
# measured value, true value lies below 0)
if pred_sign <= 0:
prob_correct = obs_model(measured_val).logcdf(0)
# For positive predictions, use log survival function
# (SF = 1 - CDF, i.e., prob that true value is
# above 0)
else:
prob_correct = obs_model(measured_val).logsf(0)
logger.info('Actual: %s, Log Probability: %s' %
(measured_val, prob_correct))
path_score += prob_correct
if not self.rule_obs_dict[node]:
logger.info('%s %s' % (node, sign))
prob_correct = obs_model(0).logcdf(0)
logger.info('Unmeasured node, Log Probability: %s' %
(prob_correct))
path_score += prob_correct
# Normalized path
#path_score = path_score / len(path)
logger.info("Path score: %s" % path_score)
path_scores.append(path_score)
path_tuples = list(zip(paths, path_scores))
# Sort first by path length
sorted_by_length = sorted(path_tuples, key=lambda x: len(x[0]))
# Sort by probability; sort in reverse order to large values
# (higher probabilities) are ranked higher
scored_paths = sorted(sorted_by_length, key=lambda x: x[1],
reverse=True)
return scored_paths
def prune_influence_map(self):
"""Remove edges between rules causing problematic non-transitivity.
First, all self-loops are removed. After this initial step, edges are
removed between rules when they share *all* child nodes except for each
other; that is, they have a mutual relationship with each other and
share all of the same children.
Note that edges must be removed in batch at the end to prevent edge
removal from affecting the lists of rule children during the comparison
process.
"""
im = self.get_im()
# First, remove all self-loops
logger.info('Removing self loops')
edges_to_remove = []
for e in im.edges():
if e[0] == e[1]:
logger.info('Removing self loop: %s', e)
edges_to_remove.append((e[0], e[1]))
# Now remove all the edges to be removed with a single call
im.remove_edges_from(edges_to_remove)
# Remove parameter nodes from influence map
remove_im_params(self.model, im)
# Now compare nodes pairwise and look for overlap between child nodes
logger.info('Get successors of each node')
succ_dict = {}
for node in im.nodes():
succ_dict[node] = set(im.successors(node))
# Sort and then group nodes by number of successors
logger.info('Compare combinations of successors')
group_key_fun = lambda x: len(succ_dict[x])
nodes_sorted = sorted(im.nodes(), key=group_key_fun)
groups = itertools.groupby(nodes_sorted, key=group_key_fun)
# Now iterate over each group and then construct combinations
# within the group to check for shared sucessors
edges_to_remove = []
for gix, group in groups:
combos = itertools.combinations(group, 2)
for ix, (p1, p2) in enumerate(combos):
# Children are identical except for mutual relationship
if succ_dict[p1].difference(succ_dict[p2]) == set([p2]) and \
succ_dict[p2].difference(succ_dict[p1]) == set([p1]):
for u, v in ((p1, p2), (p2, p1)):
edges_to_remove.append((u, v))
logger.debug('Will remove edge (%s, %s)', u, v)
logger.info('Removing %d edges from influence map' %
len(edges_to_remove))
# Now remove all the edges to be removed with a single call
im.remove_edges_from(edges_to_remove)
def prune_influence_map_subj_obj(self):
"""Prune influence map to include only edges where the object of the
upstream rule matches the subject of the downstream rule."""
def get_rule_info(r):
result = {}
for ann in self.model.annotations:
if ann.subject == r:
if ann.predicate == 'rule_has_subject':
result['subject'] = ann.object
elif ann.predicate == 'rule_has_object':
result['object'] = ann.object
return result
im = self.get_im()
rules = im.nodes()
edges_to_prune = []
for r1, r2 in itertools.permutations(rules, 2):
if (r1, r2) not in im.edges():
continue
r1_info = get_rule_info(r1)
r2_info = get_rule_info(r2)
if 'object' not in r1_info or 'subject' not in r2_info:
continue
if r1_info['object'] != r2_info['subject']:
logger.info("Removing edge %s --> %s" % (r1, r2))
edges_to_prune.append((r1, r2))
logger.info('Removing %d edges from influence map' %
len(edges_to_prune))
im.remove_edges_from(edges_to_prune)
def prune_influence_map_degrade_bind_positive(self, model_stmts):
"""Prune positive edges between X degrading and X forming a
complex with Y."""
im = self.get_im()
edges_to_prune = []
for r1, r2, data in im.edges(data=True):
s1 = stmt_from_rule(r1, self.model, model_stmts)
s2 = stmt_from_rule(r2, self.model, model_stmts)
# Make sure this is a degradation/binding combo
s1_is_degrad = (s1 and isinstance(s1, DecreaseAmount))
s2_is_bind = (s2 and isinstance(s2, Complex) and 'bind' in r2)
if not s1_is_degrad or not s2_is_bind:
continue
# Make sure what is degraded is part of the complex
if s1.obj.name not in [m.name for m in s2.members]:
continue
# Make sure we're dealing with a positive influence
if data['sign'] == 1:
edges_to_prune.append((r1, r2))
logger.info('Removing %d edges from influence map' %
len(edges_to_prune))
im.remove_edges_from(edges_to_prune)
def _find_sources_sample(im, target, sources, polarity, rule_obs_dict,
agent_to_obs, agents_values):
# Build up dict mapping observables to values
obs_dict = {}
for ag, val in agents_values.items():
obs_list = agent_to_obs[ag]
for obs in obs_list:
obs_dict[obs] = val
sigma = 0.2
def obs_model(x):
return scipy.stats.norm(x, sigma)
def _sample_pred(im, target, rule_obs_dict, obs_model):
preds = list(_get_signed_predecessors(im, target, 1))
if not preds:
return None
pred_scores = []
for pred, sign in preds:
pred_score = 0
for affected_obs, rule_obs_sign in rule_obs_dict[pred]:
pred_sign = sign * rule_obs_sign
# Check to see if this observable is in the data
logger.info('%s %s: effect %s %s' %
(pred, sign, affected_obs, pred_sign))
measured_val = obs_dict.get(affected_obs)
if measured_val:
logger.info('Actual: %s' % measured_val)
# The tail probability of the real value being above 1
tail_prob = obs_model(measured_val).cdf(1)
pred_score += (tail_prob if pred_sign == 1 else
1-tail_prob)
pred_scores.append(pred_score)
# Normalize scores
pred_scores = np.array(pred_scores) / np.sum(pred_scores)
pred_idx = np.random.choice(range(len(preds)), p=pred_scores)
pred = preds[pred_idx]
return pred
preds = []
for i in range(100):
pred = _sample_pred(im, target, rule_obs_dict, obs_model)
preds.append(pred[0])
def _find_sources_with_paths(im, target, sources, polarity):
"""Get the subset of source nodes with paths to the target.
Given a target, a list of sources, and a path polarity, perform a
breadth-first search upstream from the target to find paths to any of the
upstream sources.
Parameters
----------
im : networkx.MultiDiGraph
Graph containing the influence map.
target : str
The node (rule name) in the influence map to start looking upstream for
marching sources.
sources : list of str
The nodes (rules) corresponding to the subject or upstream influence
being checked.
polarity : int
Required polarity of the path between source and target.
Returns
-------
generator of path
Yields paths as lists of nodes (rule names). If there are no paths
to any of the given source nodes, the generator is empty.
"""
# First, create a list of visited nodes
# Adapted from
# http://stackoverflow.com/questions/8922060/
# how-to-trace-the-path-in-a-breadth-first-search
# FIXME: the sign information for the target should be associated with
# the observable itself
queue = deque([[(target, 1)]])
while queue:
# Get the first path in the queue
path = queue.popleft()
node, node_sign = path[-1]
# If there's only one node in the path, it's the observable we're
# starting from, so the path is positive
# if len(path) == 1:
# sign = 1
# Because the path runs from target back to source, we have to reverse
# the path to calculate the overall polarity
#else:
# sign = _path_polarity(im, reversed(path))
# Don't allow trivial paths consisting only of the target observable
if (sources is None or node in sources) and node_sign == polarity \
and len(path) > 1:
logger.debug('Found path: %s' % str(_flip(im, path)))
yield tuple(path)
for predecessor, sign in _get_signed_predecessors(im, node, node_sign):
# Only add predecessors to the path if it's not already in the
# path--prevents loops
if (predecessor, sign) in path:
continue
# Otherwise, the new path is a copy of the old one plus the new
# predecessor
new_path = list(path)
new_path.append((predecessor, sign))
queue.append(new_path)
return
def remove_im_params(model, im):
"""Remove parameter nodes from the influence map.
Parameters
----------
model : pysb.core.Model
PySB model.
im : networkx.MultiDiGraph
Influence map.
Returns
-------
networkx.MultiDiGraph
Influence map with the parameter nodes removed.
"""
for param in model.parameters:
# If the node doesn't exist e.g., it may have already been removed),
# skip over the parameter without error
try:
im.remove_node(param.name)
except:
pass
def _find_sources(im, target, sources, polarity):
"""Get the subset of source nodes with paths to the target.
Given a target, a list of sources, and a path polarity, perform a
breadth-first search upstream from the target to determine whether any of
the queried sources have paths to the target with the appropriate polarity.
For efficiency, does not return the full path, but identifies the upstream
sources and the length of the path.
Parameters
----------
im : networkx.MultiDiGraph
Graph containing the influence map.
target : str
The node (rule name) in the influence map to start looking upstream for
marching sources.
sources : list of str
The nodes (rules) corresponding to the subject or upstream influence
being checked.
polarity : int
Required polarity of the path between source and target.
Returns
-------
generator of (source, polarity, path_length)
Yields tuples of source node (string), polarity (int) and path length
(int). If there are no paths to any of the given source nodes, the
generator isignempty.
"""
# First, create a list of visited nodes
# Adapted from
# networkx.algorithms.traversal.breadth_first_search.bfs_edges
visited = set([(target, 1)])
# Generate list of predecessor nodes with a sign updated according to the
# sign of the target node
target_tuple = (target, 1)
# The queue holds tuples of "parents" (in this case downstream nodes) and
# their "children" (in this case their upstream influencers)
queue = deque([(target_tuple, _get_signed_predecessors(im, target, 1), 0)])
while queue:
parent, children, path_length = queue[0]
try:
# Get the next child in the list
(child, sign) = next(children)
# Is this child one of the source nodes we're looking for? If so,
# yield it along with path length.
if (sources is None or child in sources) and sign == polarity:
logger.debug("Found path to %s from %s with desired sign %s "
"with length %d" %
(target, child, polarity, path_length+1))
yield (child, sign, path_length+1)
# Check this child against the visited list. If we haven't visited
# it already (accounting for the path to the node), then add it
# to the queue.
if (child, sign) not in visited:
visited.add((child, sign))
queue.append(((child, sign),
_get_signed_predecessors(im, child, sign),
path_length + 1))
# Once we've finished iterating over the children of the current node,
# pop the node off and go to the next one in the queue
except StopIteration:
queue.popleft()
# There was no path; this will produce an empty generator
return
def _get_signed_predecessors(im, node, polarity):
"""Get upstream nodes in the influence map.
Return the upstream nodes along with the overall polarity of the path
to that node by account for the polarity of the path to the given node
and the polarity of the edge between the given node and its immediate
predecessors.
Parameters
----------
im : networkx.MultiDiGraph
Graph containing the influence map.
node : str
The node (rule name) in the influence map to get predecessors (upstream
nodes) for.
polarity : int
Polarity of the overall path to the given node.
Returns
-------
generator of tuples, (node, polarity)
Each tuple returned contains two elements, a node (string) and the
polarity of the overall path (int) to that node.
"""
signed_pred_list = []
for pred in im.predecessors(node):
pred_edge = (pred, node)
yield (pred, _get_edge_sign(im, pred_edge) * polarity)
def _get_edge_sign(im, edge):
"""Get the polarity of the influence by examining the edge sign."""
edge_data = im[edge[0]][edge[1]]
# Handle possible multiple edges between nodes
signs = list(set([v['sign'] for v in edge_data.values()
if v.get('sign')]))
if len(signs) > 1:
logger.warning("Edge %s has conflicting polarities; choosing "
"positive polarity by default" % str(edge))
sign = 1
else:
sign = signs[0]
if sign is None:
raise Exception('No sign attribute for edge.')
elif abs(sign) == 1:
return sign
else:
raise Exception('Unexpected edge sign: %s' % edge.attr['sign'])
def _add_modification_to_agent(agent, mod_type, residue, position):
"""Add a modification condition to an Agent."""
new_mod = ModCondition(mod_type, residue, position)
# Check if this modification already exists
for old_mod in agent.mods:
if old_mod.equals(new_mod):
return agent
new_agent = deepcopy(agent)
new_agent.mods.append(new_mod)
return new_agent
def _add_activity_to_agent(agent, act_type, is_active):
# Default to active, and return polarity if it's an inhibition
new_act = ActivityCondition(act_type, True)
# Check if this state already exists
if agent.activity is not None and agent.activity.equals(new_act):
return agent
new_agent = deepcopy(agent)
new_agent.activity = new_act
polarity = 1 if is_active else -1
return (new_agent, polarity)
def _match_lhs(cp, rules):
"""Get rules with a left-hand side matching the given ComplexPattern."""
rule_matches = []
for rule in rules:
reactant_pattern = rule.rule_expression.reactant_pattern
for rule_cp in reactant_pattern.complex_patterns:
if _cp_embeds_into(rule_cp, cp):
rule_matches.append(rule)
break
return rule_matches
def _cp_embeds_into(cp1, cp2):
"""Check that any state in ComplexPattern2 is matched in ComplexPattern1.
"""
# Check that any state in cp2 is matched in cp1
# If the thing we're matching to is just a monomer pattern, that makes
# things easier--we just need to find the corresponding monomer pattern
# in cp1
if cp1 is None or cp2 is None:
return False
cp1 = as_complex_pattern(cp1)
cp2 = as_complex_pattern(cp2)
if len(cp2.monomer_patterns) == 1:
mp2 = cp2.monomer_patterns[0]
# Iterate over the monomer patterns in cp1 and see if there is one
# that has the same name
for mp1 in cp1.monomer_patterns:
if _mp_embeds_into(mp1, mp2):
return True
return False
def _mp_embeds_into(mp1, mp2):
"""Check that conditions in MonomerPattern2 are met in MonomerPattern1."""
sc_matches = []
if mp1.monomer.name != mp2.monomer.name:
return False
# Check that all conditions in mp2 are met in mp1
for site_name, site_state in mp2.site_conditions.items():
if site_name not in mp1.site_conditions or \
site_state != mp1.site_conditions[site_name]:
return False
return True
"""
# NOTE: This code is currently "deprecated" because it has been replaced by the
# use of Observables for the Statement objects.
def match_rhs(cp, rules):
rule_matches = []
for rule in rules:
product_pattern = rule.rule_expression.product_pattern
for rule_cp in product_pattern.complex_patterns:
if _cp_embeds_into(rule_cp, cp):
rule_matches.append(rule)
break
return rule_matches
def find_production_rules(cp, rules):
# Find rules where the CP matches the left hand side
lhs_rule_set = set(_match_lhs(cp, rules))
# Now find rules where the CP matches the right hand side
rhs_rule_set = set(match_rhs(cp, rules))
# Production rules are rules where there is a match on the right hand
# side but not on the left hand side
prod_rules = list(rhs_rule_set.difference(lhs_rule_set))
return prod_rules
def find_consumption_rules(cp, rules):
# Find rules where the CP matches the left hand side
lhs_rule_set = set(_match_lhs(cp, rules))
# Now find rules where the CP matches the right hand side
rhs_rule_set = set(match_rhs(cp, rules))
# Consumption rules are rules where there is a match on the left hand
# side but not on the right hand side
cons_rules = list(lhs_rule_set.difference(rhs_rule_set))
return cons_rules
"""
def _flip(im, path):
# Reverse the path and the polarities associated with each node
rev = tuple(reversed(path))
return _path_with_polarities(im, rev)
def _path_with_polarities(im, path):
# This doesn't address the effect of the rules themselves on the
# observables of interest--just the effects of the rules on each other
edge_polarities = []
path_list = list(path)
edges = zip(path_list[0:-1], path_list[1:])
for from_tup, to_tup in edges:
from_rule = from_tup[0]
to_rule = to_tup[0]
edge = (from_rule, to_rule)
edge_polarities.append(_get_edge_sign(im, edge))
# Compute and return the overall path polarity
#path_polarity = np.prod(edge_polarities)
# Calculate left product of edge polarities return
polarities_lprod = [1]
for ep_ix, ep in enumerate(edge_polarities):
polarities_lprod.append(polarities_lprod[-1] * ep)
assert len(path) == len(polarities_lprod)
return tuple(zip([node for node, sign in path], polarities_lprod))
#assert path_polarity == 1 or path_polarity == -1
#return True if path_polarity == 1 else False
#return path_polarity
def stmt_from_rule(rule_name, model, stmts):
"""Return the source INDRA Statement corresponding to a rule in a model.
Parameters
----------
rule_name : str
The name of a rule in the given PySB model.
model : pysb.core.Model
A PySB model which contains the given rule.
stmts : list[indra.statements.Statement]
A list of INDRA Statements from which the model was assembled.
Returns
-------
stmt : indra.statements.Statement
The Statement from which the given rule in the model was obtained.
"""
stmt_uuid = None
for ann in model.annotations:
if ann.subject == rule_name:
if ann.predicate == 'from_indra_statement':
stmt_uuid = ann.object
break
if stmt_uuid:
for stmt in stmts:
if stmt.uuid == stmt_uuid:
return stmt
def _monomer_pattern_label(mp):
"""Return a string label for a MonomerPattern."""
site_strs = []
for site, cond in mp.site_conditions.items():
if isinstance(cond, tuple) or isinstance(cond, list):
assert len(cond) == 2
if cond[1] == WILD:
site_str = '%s_%s' % (site, cond[0])
else:
site_str = '%s_%s%s' % (site, cond[0], cond[1])
elif isinstance(cond, numbers.Real):
continue
else:
site_str = '%s_%s' % (site, cond)
site_strs.append(site_str)
return '%s_%s' % (mp.monomer.name, '_'.join(site_strs))
def _im_to_signed_digraph(im):
edges = []
for e in im.edges():
edge_sign = _get_edge_sign(im, e)
polarity = 0 if edge_sign > 0 else 1
edges.append((e[0], e[1], {'sign': polarity}))
dg = nx.DiGraph()
dg.add_edges_from(edges)
return dg
def stmts_for_path(path, model, stmts):
path_stmts = []
for path_rule, sign in path:
for rule in model.rules:
if rule.name == path_rule:
stmt = _stmt_from_rule(model, path_rule, stmts)
path_stmts.append(stmt)
return path_stmts
def _stmt_from_rule(model, rule_name, stmts):
"""Return the INDRA Statement corresponding to a given rule by name."""
stmt_uuid = None
for ann in model.annotations:
if ann.predicate == 'from_indra_statement':
if ann.subject == rule_name:
stmt_uuid = ann.object
break
if stmt_uuid:
for stmt in stmts:
if stmt.uuid == stmt_uuid:
return stmt
| 41.405819
| 80
| 0.595477
|
4a133a73e9d95b5c090999e0a07b0293dfbbe2f3
| 561
|
py
|
Python
|
discovery-infra/test_infra/utils/cluster_name.py
|
mkowalski/assisted-test-infra
|
7584c25dd96db54653026a271738c97bca1ab4cc
|
[
"Apache-2.0"
] | null | null | null |
discovery-infra/test_infra/utils/cluster_name.py
|
mkowalski/assisted-test-infra
|
7584c25dd96db54653026a271738c97bca1ab4cc
|
[
"Apache-2.0"
] | null | null | null |
discovery-infra/test_infra/utils/cluster_name.py
|
mkowalski/assisted-test-infra
|
7584c25dd96db54653026a271738c97bca1ab4cc
|
[
"Apache-2.0"
] | null | null | null |
import uuid
from dataclasses import dataclass
from test_infra import consts
from test_infra.utils import get_env
@dataclass
class ClusterName:
suffix: str = str(uuid.uuid4())[: consts.SUFFIX_LENGTH]
prefix: str = get_env("CLUSTER_NAME", f"{consts.CLUSTER_PREFIX}")
def __str__(self):
return self.get()
def __repr__(self):
return self.get()
def get(self):
name = self.prefix
if self.prefix == consts.CLUSTER_PREFIX and self.suffix:
name = self.prefix + "-" + self.suffix
return name
| 22.44
| 69
| 0.659537
|
4a133ad2911344461846f9a78ad80da95de71394
| 5,165
|
py
|
Python
|
scitbx/examples/chebyshev_lsq_example.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-03-18T12:31:57.000Z
|
2022-03-14T06:27:06.000Z
|
scitbx/examples/chebyshev_lsq_example.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
scitbx/examples/chebyshev_lsq_example.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-03-26T12:52:30.000Z
|
2021-03-26T12:52:30.000Z
|
from __future__ import absolute_import, division, print_function
from scitbx.array_family import flex
from scitbx.math import chebyshev_polynome
from scitbx.math import chebyshev_lsq_fit
from six.moves import cStringIO as StringIO
from six.moves import range
from six.moves import zip
def example():
x_obs = (flex.double(range(100))+1.0)/101.0
y_ideal = flex.sin(x_obs*6.0*3.1415) + flex.exp(x_obs)
y_obs = y_ideal + (flex.random_double(size=x_obs.size())-0.5)*0.5
w_obs = flex.double(x_obs.size(),1)
print("Trying to determine the best number of terms ")
print(" via cross validation techniques")
print()
n_terms = chebyshev_lsq_fit.cross_validate_to_determine_number_of_terms(
x_obs,y_obs,w_obs,
min_terms=5 ,max_terms=20,
n_goes=20,n_free=20)
print("Fitting with", n_terms, "terms")
print()
fit = chebyshev_lsq_fit.chebyshev_lsq_fit(n_terms,x_obs,y_obs)
print("Least Squares residual: %7.6f" %(fit.f))
print(" R2-value : %7.6f" %(fit.f/flex.sum(y_obs*y_obs)))
print()
fit_funct = chebyshev_polynome(
n_terms, fit.low_limit, fit.high_limit, fit.coefs)
y_fitted = fit_funct.f(x_obs)
abs_deviation = flex.max(
flex.abs( (y_ideal- y_fitted) ) )
print("Maximum deviation between fitted and error free data:")
print(" %4.3f" %(abs_deviation))
abs_deviation = flex.mean(
flex.abs( (y_ideal- y_fitted) ) )
print("Mean deviation between fitted and error free data:")
print(" %4.3f" %(abs_deviation))
print()
abs_deviation = flex.max(
flex.abs( (y_obs- y_fitted) ) )
print("Maximum deviation between fitted and observed data:")
print(" %4.3f" %(abs_deviation))
abs_deviation = flex.mean(
flex.abs( (y_obs- y_fitted) ) )
print("Mean deviation between fitted and observed data:")
print(" %4.3f" %(abs_deviation))
print()
print("Showing 10 points")
print(" x y_obs y_ideal y_fit")
for ii in range(10):
print("%6.3f %6.3f %6.3f %6.3f" \
%(x_obs[ii*9], y_obs[ii*9], y_ideal[ii*9], y_fitted[ii*9]))
try:
from iotbx import data_plots
except ImportError:
pass
else:
print("Preparing output for loggraph in a file called")
print(" chebyshev.loggraph")
chebyshev_plot = data_plots.plot_data(plot_title='Chebyshev fitting',
x_label = 'x values',
y_label = 'y values',
x_data = x_obs,
y_data = y_obs,
y_legend = 'Observed y values',
comments = 'Chebyshev fit')
chebyshev_plot.add_data(y_data=y_ideal,
y_legend='Error free y values')
chebyshev_plot.add_data(y_data=y_fitted,
y_legend='Fitted chebyshev approximation')
output_logfile=open('chebyshev.loggraph','w')
f = StringIO()
data_plots.plot_data_loggraph(chebyshev_plot,f)
output_logfile.write(f.getvalue())
def another_example(np=41,nt=5):
x = flex.double( range(np) )/(np-1)
y = 0.99*flex.exp(-x*x*0.5)
y = -flex.log(1.0/y-1)
w = y*y/1.0
d = (flex.random_double(np)-0.5)*w
y_obs = y+d
y = 1.0/( 1.0 + flex.exp(-y) )
fit_w = chebyshev_lsq_fit.chebyshev_lsq_fit(nt,
x,
y_obs,
w )
fit_w_f = chebyshev_polynome(
nt, fit_w.low_limit, fit_w.high_limit, fit_w.coefs)
fit_nw = chebyshev_lsq_fit.chebyshev_lsq_fit(nt,
x,
y_obs)
fit_nw_f = chebyshev_polynome(
nt, fit_nw.low_limit, fit_nw.high_limit, fit_nw.coefs)
print()
print("Coefficients from weighted lsq")
print(list( fit_w.coefs ))
print("Coefficients from non-weighted lsq")
print(list( fit_nw.coefs ))
assert flex.max( flex.abs(fit_nw.coefs-fit_w.coefs) ) > 0
def runge_phenomenon(self,n=41,nt=35,print_it=False):
x_e = 2.0*(flex.double( range(n) )/float(n-1)-0.5)
y_e = 1/(1+x_e*x_e*25)
fit_e = chebyshev_lsq_fit.chebyshev_lsq_fit(nt,
x_e,
y_e,
)
fit_e = chebyshev_polynome(
nt, fit_e.low_limit, fit_e.high_limit, fit_e.coefs)
x_c = chebyshev_lsq_fit.chebyshev_nodes(n, -1, 1, True)
y_c = 1/(1+x_c*x_c*25)
fit_c = chebyshev_lsq_fit.chebyshev_lsq_fit(nt,
x_c,
y_c,
)
fit_c = chebyshev_polynome(
nt, fit_c.low_limit, fit_c.high_limit, fit_c.coefs)
x_plot = 2.0*(flex.double( range(3*n) )/float(3*n-1)-0.5)
y_plot_e = fit_e.f( x_plot )
y_plot_c = fit_c.f( x_plot )
y_id = 1/(1+x_plot*x_plot*25)
if print_it:
for x,y,yy,yyy in zip(x_plot,y_id,y_plot_e,y_plot_c):
print(x,y,yy,yyy)
if (__name__ == "__main__"):
example()
another_example()
runge_phenomenon(10)
| 34.433333
| 74
| 0.582381
|
4a133bff451fc87804bd6faf0276f85e7944e13e
| 1,681
|
py
|
Python
|
src/sentinel/azext_sentinel/vendored_sdks/security_insights/models/data_connectors_check_requirements_py3.py
|
hpsan/azure-cli-extensions
|
be1589bb6dd23837796e088d28e65e873050171e
|
[
"MIT"
] | null | null | null |
src/sentinel/azext_sentinel/vendored_sdks/security_insights/models/data_connectors_check_requirements_py3.py
|
hpsan/azure-cli-extensions
|
be1589bb6dd23837796e088d28e65e873050171e
|
[
"MIT"
] | null | null | null |
src/sentinel/azext_sentinel/vendored_sdks/security_insights/models/data_connectors_check_requirements_py3.py
|
hpsan/azure-cli-extensions
|
be1589bb6dd23837796e088d28e65e873050171e
|
[
"MIT"
] | 1
|
2020-07-16T23:49:49.000Z
|
2020-07-16T23:49:49.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DataConnectorsCheckRequirements(Model):
"""Data connector requirements properties.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AADCheckRequirements, AATPCheckRequirements,
ASCCheckRequirements, AwsCloudTrailCheckRequirements,
MCASCheckRequirements, MDATPCheckRequirements, TICheckRequirements,
TiTaxiiCheckRequirements
All required parameters must be populated in order to send to Azure.
:param kind: Required. Constant filled by server.
:type kind: str
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'kind': {'key': 'kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'AzureActiveDirectory': 'AADCheckRequirements', 'AzureAdvancedThreatProtection': 'AATPCheckRequirements', 'AzureSecurityCenter': 'ASCCheckRequirements', 'AmazonWebServicesCloudTrail': 'AwsCloudTrailCheckRequirements', 'MicrosoftCloudAppSecurity': 'MCASCheckRequirements', 'MicrosoftDefenderAdvancedThreatProtection': 'MDATPCheckRequirements', 'ThreatIntelligence': 'TICheckRequirements', 'ThreatIntelligenceTaxii': 'TiTaxiiCheckRequirements'}
}
def __init__(self, **kwargs) -> None:
super(DataConnectorsCheckRequirements, self).__init__(**kwargs)
self.kind = None
| 41
| 459
| 0.675193
|
4a133c0a05904ea1ddee4df6405f53017d07c94a
| 8,362
|
py
|
Python
|
var/spack/repos/builtin/packages/dihydrogen/package.py
|
wscullin/spack
|
ace3753076941ed8b642864b36305aecbe2bd35b
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/dihydrogen/package.py
|
wscullin/spack
|
ace3753076941ed8b642864b36305aecbe2bd35b
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 5
|
2021-07-26T03:14:25.000Z
|
2022-03-31T03:19:31.000Z
|
var/spack/repos/builtin/packages/dihydrogen/package.py
|
wscullin/spack
|
ace3753076941ed8b642864b36305aecbe2bd35b
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Dihydrogen(CMakePackage, CudaPackage, ROCmPackage):
"""DiHydrogen is the second version of the Hydrogen fork of the
well-known distributed linear algebra library,
Elemental. DiHydrogen aims to be a basic distributed
multilinear algebra interface with a particular emphasis on the
needs of the distributed machine learning effort, LBANN."""
homepage = "https://github.com/LLNL/DiHydrogen.git"
url = "https://github.com/LLNL/DiHydrogen/archive/v0.1.tar.gz"
git = "https://github.com/LLNL/DiHydrogen.git"
maintainers = ['bvanessen']
version('develop', branch='develop')
version('master', branch='master')
version('0.2.1', sha256='11e2c0f8a94ffa22e816deff0357dde6f82cc8eac21b587c800a346afb5c49ac')
version('0.2.0', sha256='e1f597e80f93cf49a0cb2dbc079a1f348641178c49558b28438963bd4a0bdaa4')
version('0.1', sha256='171d4b8adda1e501c38177ec966e6f11f8980bf71345e5f6d87d0a988fef4c4e')
variant('al', default=True,
description='Builds with Aluminum communication library')
variant('developer', default=False,
description='Enable extra warnings and force tests to be enabled.')
variant('half', default=False,
description='Enable FP16 support on the CPU.')
variant('distconv', default=False,
description='Support distributed convolutions: spatial, channel, '
'filter.')
variant('nvshmem', default=False,
description='Builds with support for NVSHMEM')
variant('openmp', default=False,
description='Enable CPU acceleration with OpenMP threads.')
variant('rocm', default=False,
description='Enable ROCm/HIP language features.')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('docs', default=False,
description='Builds with support for building documentation')
# Variants related to BLAS
variant('openmp_blas', default=False,
description='Use OpenMP for threading in the BLAS library')
variant('int64_blas', default=False,
description='Use 64bit integers for BLAS.')
variant('blas', default='openblas', values=('openblas', 'mkl', 'accelerate', 'essl'),
description='Enable the use of OpenBlas/MKL/Accelerate/ESSL')
conflicts('~cuda', when='+nvshmem')
depends_on('mpi')
depends_on('catch2', type='test')
# Specify the correct version of Aluminum
depends_on('aluminum@0.4:0.4.99', when='@0.1:0.1.99 +al')
depends_on('aluminum@0.5.0:0.5.99', when='@0.2.0 +al')
depends_on('aluminum@0.7.0:0.7.99', when='@0.2.1 +al')
depends_on('aluminum@0.7.0:', when='@:0.0,0.2.1: +al')
# Add Aluminum variants
depends_on('aluminum +cuda +nccl +ht +cuda_rma', when='+al +cuda')
depends_on('aluminum +rocm +rccl +ht', when='+al +rocm')
for arch in CudaPackage.cuda_arch_values:
depends_on('aluminum cuda_arch=%s' % arch, when='+al +cuda cuda_arch=%s' % arch)
# variants +rocm and amdgpu_targets are not automatically passed to
# dependencies, so do it manually.
for val in ROCmPackage.amdgpu_targets:
depends_on('aluminum amdgpu_target=%s' % val, when='amdgpu_target=%s' % val)
for when in ['+cuda', '+distconv']:
depends_on('cuda', when=when)
depends_on('cudnn', when=when)
depends_on('cub', when='^cuda@:10.99')
# Note that #1712 forces us to enumerate the different blas variants
depends_on('openblas', when='blas=openblas')
depends_on('openblas +ilp64', when='blas=openblas +int64_blas')
depends_on('openblas threads=openmp', when='blas=openblas +openmp_blas')
depends_on('intel-mkl', when="blas=mkl")
depends_on('intel-mkl +ilp64', when="blas=mkl +int64_blas")
depends_on('intel-mkl threads=openmp', when='blas=mkl +openmp_blas')
depends_on('veclibfort', when='blas=accelerate')
conflicts('blas=accelerate +openmp_blas')
depends_on('essl', when='blas=essl')
depends_on('essl +ilp64', when='blas=essl +int64_blas')
depends_on('essl threads=openmp', when='blas=essl +openmp_blas')
depends_on('netlib-lapack +external-blas', when='blas=essl')
# Distconv builds require cuda
conflicts('~cuda', when='+distconv')
conflicts('+distconv', when='+half')
conflicts('+rocm', when='+half')
depends_on('half', when='+half')
generator = 'Ninja'
depends_on('ninja', type='build')
depends_on('cmake@3.17.0:', type='build')
depends_on('py-breathe', type='build', when='+docs')
depends_on('doxygen', type='build', when='+docs')
depends_on('llvm-openmp', when='%apple-clang +openmp')
depends_on('nvshmem', when='+nvshmem')
# Idenfity versions of cuda_arch that are too old
# from lib/spack/spack/build_systems/cuda.py
illegal_cuda_arch_values = [
'10', '11', '12', '13',
'20', '21',
]
for value in illegal_cuda_arch_values:
conflicts('cuda_arch=' + value)
@property
def libs(self):
shared = True if '+shared' in self.spec else False
return find_libraries(
'libH2Core', root=self.prefix, shared=shared, recursive=True
)
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_CXX_STANDARD=17',
'-DCMAKE_INSTALL_MESSAGE:STRING=LAZY',
'-DBUILD_SHARED_LIBS:BOOL=%s' % ('+shared' in spec),
'-DH2_ENABLE_ALUMINUM=%s' % ('+al' in spec),
'-DH2_ENABLE_CUDA=%s' % ('+cuda' in spec),
'-DH2_ENABLE_DISTCONV_LEGACY=%s' % ('+distconv' in spec),
'-DH2_ENABLE_OPENMP=%s' % ('+openmp' in spec),
'-DH2_ENABLE_FP16=%s' % ('+half' in spec),
'-DH2_ENABLE_HIP_ROCM=%s' % ('+rocm' in spec),
'-DH2_DEVELOPER_BUILD=%s' % ('+developer' in spec),
]
if '+cuda' in spec:
if spec.satisfies('^cuda@11.0:'):
args.append('-DCMAKE_CUDA_STANDARD=17')
else:
args.append('-DCMAKE_CUDA_STANDARD=14')
archs = spec.variants['cuda_arch'].value
if archs != 'none':
arch_str = ";".join(archs)
args.append('-DCMAKE_CUDA_ARCHITECTURES=%s' % arch_str)
if '+cuda' in spec or '+distconv' in spec:
args.append('-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix))
if spec.satisfies('^cuda@:10.99'):
if '+cuda' in spec or '+distconv' in spec:
args.append('-DCUB_DIR={0}'.format(
spec['cub'].prefix))
# Add support for OpenMP with external (Brew) clang
if spec.satisfies('%clang +openmp platform=darwin'):
clang = self.compiler.cc
clang_bin = os.path.dirname(clang)
clang_root = os.path.dirname(clang_bin)
args.extend([
'-DOpenMP_CXX_FLAGS=-fopenmp=libomp',
'-DOpenMP_CXX_LIB_NAMES=libomp',
'-DOpenMP_libomp_LIBRARY={0}/lib/libomp.dylib'.format(
clang_root)])
if '+rocm' in spec:
args.extend([
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix),
'-DHIP_CXX_COMPILER={0}'.format(self.spec['hip'].hipcc)])
archs = self.spec.variants['amdgpu_target'].value
if archs != 'none':
arch_str = ",".join(archs)
args.append(
'-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'
' -g -fsized-deallocation -fPIC'.format(arch_str)
)
return args
def setup_build_environment(self, env):
if self.spec.satisfies('%apple-clang +openmp'):
env.append_flags(
'CPPFLAGS', self.compiler.openmp_flag)
env.append_flags(
'CFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'CXXFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'LDFLAGS', self.spec['llvm-openmp'].libs.ld_flags)
| 40.009569
| 95
| 0.618871
|
4a133c3b0ed546db192d7e02472a47db9cbdfe5a
| 9,047
|
py
|
Python
|
tests/helpers/__init__.py
|
MosheFriedland/cloudbridge
|
af7644322044863d401645311c0d1f2556bccb63
|
[
"MIT"
] | 61
|
2018-07-10T18:32:43.000Z
|
2022-03-06T04:50:20.000Z
|
tests/helpers/__init__.py
|
MosheFriedland/cloudbridge
|
af7644322044863d401645311c0d1f2556bccb63
|
[
"MIT"
] | 134
|
2018-07-02T16:46:29.000Z
|
2022-02-03T17:05:43.000Z
|
tests/helpers/__init__.py
|
MosheFriedland/cloudbridge
|
af7644322044863d401645311c0d1f2556bccb63
|
[
"MIT"
] | 23
|
2018-08-07T17:33:16.000Z
|
2021-12-25T01:44:20.000Z
|
import functools
import operator
import os
import sys
import unittest
import uuid
from cloudbridge.base import helpers as cb_helpers
from cloudbridge.factory import CloudProviderFactory
from cloudbridge.interfaces import CloudProvider
from cloudbridge.interfaces import InstanceState
from cloudbridge.interfaces import TestMockHelperMixin
from cloudbridge.interfaces.resources import FloatingIpState
from cloudbridge.interfaces.resources import NetworkState
from cloudbridge.interfaces.resources import SubnetState
def parse_bool(val):
if val:
return str(val).upper() in ['TRUE', 'YES']
else:
return False
def skipIfNoService(services):
"""
A decorator for skipping tests if the provider
does not implement a given service.
"""
def wrap(func):
"""
The actual wrapper
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
provider = getattr(self, 'provider')
if provider:
for service in services:
if not provider.has_service(service):
self.skipTest("Skipping test because '%s' service is"
" not implemented" % (service,))
func(self, *args, **kwargs)
return wrapper
return wrap
def skipIfPython(op, major, minor):
"""
A decorator for skipping tests if the python
version doesn't match
"""
def stringToOperator(op):
op_map = {
"=": operator.eq,
"==": operator.eq,
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
}
return op_map.get(op)
def wrap(func):
"""
The actual wrapper
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
op_func = stringToOperator(op)
if op_func(sys.version_info, (major, minor)):
self.skipTest(
"Skipping test because python version {0} is {1} expected"
" version {2}".format(sys.version_info[:2],
op, (major, minor)))
func(self, *args, **kwargs)
return wrapper
return wrap
TEST_DATA_CONFIG = {
"AWSCloudProvider": {
# Match the ami value with entry in custom_amis.json for use with moto
"image": cb_helpers.get_env('CB_IMAGE_AWS', 'ami-aa2ea6d0'),
"vm_type": cb_helpers.get_env('CB_VM_TYPE_AWS', 't2.nano'),
"placement": cb_helpers.get_env('CB_PLACEMENT_AWS', 'us-east-1a'),
"placement_cfg_key": "aws_zone_name"
},
'OpenStackCloudProvider': {
'image': cb_helpers.get_env('CB_IMAGE_OS',
'c66bdfa1-62b1-43be-8964-e9ce208ac6a5'),
"vm_type": cb_helpers.get_env('CB_VM_TYPE_OS', 'm1.tiny'),
"placement": cb_helpers.get_env('CB_PLACEMENT_OS', 'nova'),
"placement_cfg_key": "os_zone_name"
},
'GCPCloudProvider': {
'image': cb_helpers.get_env(
'CB_IMAGE_GCP',
'https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/'
'global/images/ubuntu-1804-bionic-v20200908'),
'vm_type': cb_helpers.get_env('CB_VM_TYPE_GCP', 'f1-micro'),
'placement': cb_helpers.get_env('GCP_ZONE_NAME', 'us-central1-a'),
"placement_cfg_key": "gcp_zone_name"
},
"AzureCloudProvider": {
"image":
cb_helpers.get_env('CB_IMAGE_AZURE',
'Canonical:UbuntuServer:16.04.0-LTS:latest'),
"vm_type": cb_helpers.get_env('CB_VM_TYPE_AZURE', 'Basic_A2'),
"placement": cb_helpers.get_env('CB_PLACEMENT_AZURE', 'eastus'),
"placement_cfg_key": "azure_zone_name"
}
}
def get_provider_test_data(provider, key):
provider_id = (provider.PROVIDER_ID if isinstance(provider, CloudProvider)
else provider)
if "aws" == provider_id:
return TEST_DATA_CONFIG.get("AWSCloudProvider").get(key)
if "mock" == provider_id:
return TEST_DATA_CONFIG.get("AWSCloudProvider").get(key)
elif "openstack" == provider_id:
return TEST_DATA_CONFIG.get("OpenStackCloudProvider").get(key)
elif "gcp" == provider_id:
return TEST_DATA_CONFIG.get("GCPCloudProvider").get(key)
elif "azure" == provider_id:
return TEST_DATA_CONFIG.get("AzureCloudProvider").get(key)
return None
def get_or_create_default_subnet(provider):
"""
Return the default subnet to be used for tests
"""
return provider.networking.subnets.get_or_create_default()
def cleanup_subnet(subnet):
if subnet:
subnet.delete()
subnet.wait_for([SubnetState.UNKNOWN],
terminal_states=[SubnetState.ERROR])
def cleanup_network(network):
"""
Delete the supplied network, first deleting any contained subnets.
"""
if network:
try:
for sn in network.subnets:
with cb_helpers.cleanup_action(lambda: cleanup_subnet(sn)):
pass
finally:
network.delete()
network.wait_for([NetworkState.UNKNOWN],
terminal_states=[NetworkState.ERROR])
def cleanup_fip(fip):
if fip:
fip.delete()
fip.wait_for([FloatingIpState.UNKNOWN],
terminal_states=[FloatingIpState.ERROR])
def get_test_gateway(provider):
"""
Get an internet gateway for testing.
This includes creating a network for the gateway, which is also returned.
"""
sn = get_or_create_default_subnet(provider)
net = sn.network
return net.gateways.get_or_create()
def cleanup_gateway(gateway):
"""
Delete the supplied network and gateway.
"""
with cb_helpers.cleanup_action(lambda: gateway.delete()):
pass
def create_test_instance(
provider, instance_label, subnet, launch_config=None,
key_pair=None, vm_firewalls=None, user_data=None):
instance = provider.compute.instances.create(
instance_label, get_provider_test_data(provider, 'image'),
get_provider_test_data(provider, 'vm_type'),
subnet=subnet,
key_pair=key_pair,
vm_firewalls=vm_firewalls,
launch_config=launch_config,
user_data=user_data)
return instance
def get_test_instance(provider, label, key_pair=None, vm_firewalls=None,
subnet=None, user_data=None):
launch_config = None
instance = create_test_instance(
provider,
label,
subnet=subnet,
key_pair=key_pair,
vm_firewalls=vm_firewalls,
launch_config=launch_config,
user_data=user_data)
instance.wait_till_ready()
return instance
def get_test_fixtures_folder():
return os.path.join(os.path.dirname(__file__), '../fixtures/')
def delete_instance(instance):
if instance:
instance.delete()
instance.wait_for([InstanceState.DELETED, InstanceState.UNKNOWN],
terminal_states=[InstanceState.ERROR])
def cleanup_test_resources(instance=None, vm_firewall=None,
key_pair=None, network=None):
"""Clean up any combination of supplied resources."""
with cb_helpers.cleanup_action(
lambda: cleanup_network(network) if network else None):
with cb_helpers.cleanup_action(
lambda: key_pair.delete() if key_pair else None):
with cb_helpers.cleanup_action(
lambda: vm_firewall.delete() if vm_firewall else None):
delete_instance(instance)
def get_uuid():
return str(uuid.uuid4())[:6]
class ProviderTestBase(unittest.TestCase):
_provider = None
def setUp(self):
if isinstance(self.provider, TestMockHelperMixin):
self.provider.setUpMock()
def tearDown(self):
if isinstance(self.provider, TestMockHelperMixin):
self.provider.tearDownMock()
self._provider = None
def get_provider_wait_interval(self, provider_class):
if issubclass(provider_class, TestMockHelperMixin):
return 0
else:
return 1
def create_provider_instance(self):
provider_name = cb_helpers.get_env("CB_TEST_PROVIDER", "aws")
zone_cfg_key = get_provider_test_data(provider_name,
'placement_cfg_key')
factory = CloudProviderFactory()
provider_class = factory.get_provider_class(provider_name)
config = {
'default_wait_interval': self.get_provider_wait_interval(
provider_class),
'default_result_limit': 5,
zone_cfg_key: get_provider_test_data(provider_name, 'placement')
}
return provider_class(config)
@property
def provider(self):
if not self._provider:
self._provider = self.create_provider_instance()
return self._provider
| 32.08156
| 78
| 0.626174
|
4a133c66948833fd89dd39c01476822b76ff8a46
| 8,920
|
py
|
Python
|
python/http_client/v1/polyaxon_sdk/models/v1_hook.py
|
mouradmourafiq/polyaxon-client
|
5fc32b9decc7305161561d404b0127f3e900c64a
|
[
"Apache-2.0"
] | null | null | null |
python/http_client/v1/polyaxon_sdk/models/v1_hook.py
|
mouradmourafiq/polyaxon-client
|
5fc32b9decc7305161561d404b0127f3e900c64a
|
[
"Apache-2.0"
] | null | null | null |
python/http_client/v1/polyaxon_sdk/models/v1_hook.py
|
mouradmourafiq/polyaxon-client
|
5fc32b9decc7305161561d404b0127f3e900c64a
|
[
"Apache-2.0"
] | 1
|
2021-12-03T07:12:03.000Z
|
2021-12-03T07:12:03.000Z
|
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.18.2
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Hook(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'hub_ref': 'str',
'connection': 'str',
'trigger': 'V1Statuses',
'conditions': 'str',
'params': 'dict(str, V1Param)',
'queue': 'str',
'presets': 'list[str]',
'disable_defaults': 'bool'
}
attribute_map = {
'hub_ref': 'hubRef',
'connection': 'connection',
'trigger': 'trigger',
'conditions': 'conditions',
'params': 'params',
'queue': 'queue',
'presets': 'presets',
'disable_defaults': 'disableDefaults'
}
def __init__(self, hub_ref=None, connection=None, trigger=None, conditions=None, params=None, queue=None, presets=None, disable_defaults=None, local_vars_configuration=None): # noqa: E501
"""V1Hook - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._hub_ref = None
self._connection = None
self._trigger = None
self._conditions = None
self._params = None
self._queue = None
self._presets = None
self._disable_defaults = None
self.discriminator = None
if hub_ref is not None:
self.hub_ref = hub_ref
if connection is not None:
self.connection = connection
if trigger is not None:
self.trigger = trigger
if conditions is not None:
self.conditions = conditions
if params is not None:
self.params = params
if queue is not None:
self.queue = queue
if presets is not None:
self.presets = presets
if disable_defaults is not None:
self.disable_defaults = disable_defaults
@property
def hub_ref(self):
"""Gets the hub_ref of this V1Hook. # noqa: E501
:return: The hub_ref of this V1Hook. # noqa: E501
:rtype: str
"""
return self._hub_ref
@hub_ref.setter
def hub_ref(self, hub_ref):
"""Sets the hub_ref of this V1Hook.
:param hub_ref: The hub_ref of this V1Hook. # noqa: E501
:type hub_ref: str
"""
self._hub_ref = hub_ref
@property
def connection(self):
"""Gets the connection of this V1Hook. # noqa: E501
:return: The connection of this V1Hook. # noqa: E501
:rtype: str
"""
return self._connection
@connection.setter
def connection(self, connection):
"""Sets the connection of this V1Hook.
:param connection: The connection of this V1Hook. # noqa: E501
:type connection: str
"""
self._connection = connection
@property
def trigger(self):
"""Gets the trigger of this V1Hook. # noqa: E501
:return: The trigger of this V1Hook. # noqa: E501
:rtype: V1Statuses
"""
return self._trigger
@trigger.setter
def trigger(self, trigger):
"""Sets the trigger of this V1Hook.
:param trigger: The trigger of this V1Hook. # noqa: E501
:type trigger: V1Statuses
"""
self._trigger = trigger
@property
def conditions(self):
"""Gets the conditions of this V1Hook. # noqa: E501
:return: The conditions of this V1Hook. # noqa: E501
:rtype: str
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1Hook.
:param conditions: The conditions of this V1Hook. # noqa: E501
:type conditions: str
"""
self._conditions = conditions
@property
def params(self):
"""Gets the params of this V1Hook. # noqa: E501
:return: The params of this V1Hook. # noqa: E501
:rtype: dict(str, V1Param)
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this V1Hook.
:param params: The params of this V1Hook. # noqa: E501
:type params: dict(str, V1Param)
"""
self._params = params
@property
def queue(self):
"""Gets the queue of this V1Hook. # noqa: E501
:return: The queue of this V1Hook. # noqa: E501
:rtype: str
"""
return self._queue
@queue.setter
def queue(self, queue):
"""Sets the queue of this V1Hook.
:param queue: The queue of this V1Hook. # noqa: E501
:type queue: str
"""
self._queue = queue
@property
def presets(self):
"""Gets the presets of this V1Hook. # noqa: E501
:return: The presets of this V1Hook. # noqa: E501
:rtype: list[str]
"""
return self._presets
@presets.setter
def presets(self, presets):
"""Sets the presets of this V1Hook.
:param presets: The presets of this V1Hook. # noqa: E501
:type presets: list[str]
"""
self._presets = presets
@property
def disable_defaults(self):
"""Gets the disable_defaults of this V1Hook. # noqa: E501
:return: The disable_defaults of this V1Hook. # noqa: E501
:rtype: bool
"""
return self._disable_defaults
@disable_defaults.setter
def disable_defaults(self, disable_defaults):
"""Sets the disable_defaults of this V1Hook.
:param disable_defaults: The disable_defaults of this V1Hook. # noqa: E501
:type disable_defaults: bool
"""
self._disable_defaults = disable_defaults
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Hook):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Hook):
return True
return self.to_dict() != other.to_dict()
| 26.94864
| 192
| 0.585426
|
4a133d3593f1d6ff30b6924e01dd5fa5b9b89251
| 3,472
|
py
|
Python
|
tests/util/test_config.py
|
Palisand/ambramelin
|
264da5c3592dc9287bdda3c1383a04420439d07b
|
[
"MIT"
] | null | null | null |
tests/util/test_config.py
|
Palisand/ambramelin
|
264da5c3592dc9287bdda3c1383a04420439d07b
|
[
"MIT"
] | null | null | null |
tests/util/test_config.py
|
Palisand/ambramelin
|
264da5c3592dc9287bdda3c1383a04420439d07b
|
[
"MIT"
] | null | null | null |
import json
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
import cattr
import pytest
from pytest_mock import MockerFixture
from ambramelin.util import config as util_config
from ambramelin.util.config import Config, Environment, User
class TestLoadConfig:
def test_with_file(self, mocker: MockerFixture) -> None:
config = Config(
current="envname",
envs={"envname": Environment(url="envurl", user="username")},
users={"username": User(credentials_manager="dummy")},
)
with NamedTemporaryFile() as conf_file:
conf_file.write(json.dumps(cattr.unstructure(config), indent=2).encode())
conf_file.seek(0)
mocker.patch.object(
util_config, "_get_config_path", return_value=Path(conf_file.name)
)
assert util_config.load_config() == config
def test_with_no_file(self, mocker: MockerFixture) -> None:
mocker.patch.object(
util_config, "_get_config_path", return_value=Path("nonexistent")
)
assert util_config.load_config() == Config()
def test_save_config(mocker: MockerFixture) -> None:
config = Config(
current="envname",
envs={"envname": Environment(url="envurl", user="username")},
users={"username": User(credentials_manager="dummy")},
)
with TemporaryDirectory() as tmp:
path = Path(tmp) / "config.json"
mocker.patch.object(util_config, "_get_config_path", return_value=path)
util_config.save_config(config)
with path.open("r") as f:
assert cattr.structure(json.loads(f.read()), Config) == config
def test_update_config(mocker: MockerFixture) -> None:
mocker.patch.object(util_config, "load_config", return_value=Config())
mock_save_config = mocker.patch.object(util_config, "save_config")
with util_config.update_config() as config:
config.current = "current"
mock_save_config.assert_called_once_with(config)
@pytest.mark.parametrize(
"config,result",
(
(Config(envs={"env": Environment(url="")}), True),
(Config(), False),
),
)
def test_envs_added(config: Config, result: bool) -> None:
assert util_config.envs_added(config) is result
@pytest.mark.parametrize(
"config,result",
(
(Config(current="env"), True),
(Config(), False),
),
)
def test_env_selected(config: Config, result: bool) -> None:
assert util_config.env_selected(config) is result
@pytest.mark.parametrize(
"env_name,result",
(
("env1", True),
("env2", False),
),
)
def test_env_exists(env_name: str, result: bool) -> None:
assert (
util_config.env_exists(Config(envs={"env1": Environment(url="")}), env_name)
is result
)
@pytest.mark.parametrize(
"config,result",
(
(Config(users={"user": User(credentials_manager="keychain")}), True),
(Config(), False),
),
)
def test_users_added(config: Config, result: bool) -> None:
assert util_config.users_added(config) is result
@pytest.mark.parametrize(
"user_name,result",
(
("user1", True),
("user2", False),
),
)
def test_user_exists(user_name: str, result: bool) -> None:
assert (
util_config.user_exists(
Config(users={"user1": User(credentials_manager="keychain")}), user_name
)
is result
)
| 28
| 85
| 0.642857
|
4a133ddbe3f1745f7993fe6ffef695c0e1730bc8
| 1,994
|
py
|
Python
|
src_py/elf/zmq_util.py
|
r-woo/elfai
|
2c37625e608e7720b8bd7847419d7b53e87e260a
|
[
"BSD-3-Clause"
] | 3,305
|
2018-05-02T17:41:36.000Z
|
2022-03-28T05:57:56.000Z
|
src_py/elf/zmq_util.py
|
r-woo/elfai
|
2c37625e608e7720b8bd7847419d7b53e87e260a
|
[
"BSD-3-Clause"
] | 135
|
2018-05-02T19:25:13.000Z
|
2020-08-20T02:39:14.000Z
|
src_py/elf/zmq_util.py
|
r-woo/elfai
|
2c37625e608e7720b8bd7847419d7b53e87e260a
|
[
"BSD-3-Clause"
] | 604
|
2018-05-02T19:38:45.000Z
|
2022-03-18T10:01:57.000Z
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import zmq
class ZMQCtx:
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, ty, value, tb):
if value is not None:
# print(value)
pass
return True
# print("Send failed for " + self.identity + "..")
class ZMQSender:
def __init__(self, addr, identity, send_timeout=0, recv_timeout=0):
self.ctx = zmq.Context()
self.ctx.setsockopt(zmq.IPV6, 1)
self.sender = self.ctx.socket(zmq.DEALER)
self.sender.identity = identity.encode('ascii')
# self.sender.set_hwm(10000)
if send_timeout > 0:
self.sender.SNDTIMEO = send_timeout
if recv_timeout > 0:
self.sender.RCVTIMEO = recv_timeout
self.sender.connect(addr)
def Send(self, msg, copy=False):
with ZMQCtx():
self.sender.send(msg, copy=copy)
return True
return False
def Receive(self):
with ZMQCtx():
return self.sender.recv()
return None
class ZMQReceiver:
def __init__(self, addr, timeout=0):
self.ctx = zmq.Context()
self.ctx.setsockopt(zmq.IPV6, 1)
self.receiver = self.ctx.socket(zmq.ROUTER)
# self.receiver.set_hwm(10000)
if timeout > 0:
self.receiver.RCVTIMEO = timeout
self.receiver.bind(addr)
def Send(self, identity, msg):
with ZMQCtx():
self.receiver.send_multipart([identity, msg])
return True
return False
def Receive(self):
# return identity, msg
with ZMQCtx():
identity, msg = self.receiver.recv_multipart()
# print(identity)
# print(msg)
return identity, msg
return None, None
| 25.896104
| 71
| 0.585757
|
4a133e8efdcb369e2805a7147ce7096ab5faf268
| 1,843
|
py
|
Python
|
httpx/__init__.py
|
bandoche/httpx
|
b23420392efdcc10f3d802f335739d9cb3d72d5c
|
[
"BSD-3-Clause"
] | null | null | null |
httpx/__init__.py
|
bandoche/httpx
|
b23420392efdcc10f3d802f335739d9cb3d72d5c
|
[
"BSD-3-Clause"
] | null | null | null |
httpx/__init__.py
|
bandoche/httpx
|
b23420392efdcc10f3d802f335739d9cb3d72d5c
|
[
"BSD-3-Clause"
] | null | null | null |
from .__version__ import __description__, __title__, __version__
from .api import delete, get, head, options, patch, post, put, request, stream
from .auth import Auth, BasicAuth, DigestAuth
from .client import AsyncClient, Client
from .config import PoolLimits, Proxy, Timeout
from .dispatch.asgi import ASGIDispatch
from .dispatch.wsgi import WSGIDispatch
from .exceptions import (
ConnectionClosed,
ConnectTimeout,
CookieConflict,
DecodingError,
HTTPError,
InvalidURL,
NotRedirectResponse,
PoolTimeout,
ProtocolError,
ProxyError,
ReadTimeout,
RedirectLoop,
RequestBodyUnavailable,
RequestNotRead,
ResponseClosed,
ResponseNotRead,
StreamConsumed,
TimeoutException,
TooManyRedirects,
WriteTimeout,
)
from .models import URL, Cookies, Headers, QueryParams, Request, Response
from .status_codes import StatusCode, codes
__all__ = [
"__description__",
"__title__",
"__version__",
"delete",
"get",
"head",
"options",
"patch",
"post",
"patch",
"put",
"request",
"stream",
"codes",
"ASGIDispatch",
"AsyncClient",
"Auth",
"BasicAuth",
"Client",
"DigestAuth",
"PoolLimits",
"Proxy",
"Timeout",
"ConnectTimeout",
"CookieConflict",
"ConnectionClosed",
"DecodingError",
"HTTPError",
"InvalidURL",
"NotRedirectResponse",
"PoolTimeout",
"ProtocolError",
"ReadTimeout",
"RedirectLoop",
"RequestBodyUnavailable",
"ResponseClosed",
"ResponseNotRead",
"RequestNotRead",
"StreamConsumed",
"ProxyError",
"TooManyRedirects",
"WriteTimeout",
"URL",
"StatusCode",
"Cookies",
"Headers",
"QueryParams",
"Request",
"TimeoutException",
"Response",
"DigestAuth",
"WSGIDispatch",
]
| 21.183908
| 78
| 0.652198
|
4a133eb0501bfe4baeed13e705ac5c831c7645fb
| 2,174
|
py
|
Python
|
nessai/utils/logging.py
|
Rodrigo-Tenorio/nessai
|
2b4175da61b3a7250d1154a126ad93481836df0d
|
[
"MIT"
] | 16
|
2021-02-18T00:04:54.000Z
|
2021-09-01T03:25:45.000Z
|
nessai/utils/logging.py
|
Rodrigo-Tenorio/nessai
|
2b4175da61b3a7250d1154a126ad93481836df0d
|
[
"MIT"
] | 59
|
2021-03-09T11:05:37.000Z
|
2022-03-30T14:21:14.000Z
|
nessai/utils/logging.py
|
Rodrigo-Tenorio/nessai
|
2b4175da61b3a7250d1154a126ad93481836df0d
|
[
"MIT"
] | 1
|
2022-03-25T12:28:16.000Z
|
2022-03-25T12:28:16.000Z
|
# -*- coding: utf-8 -*-
"""
Utilities related to logging.
"""
import logging
import os
def setup_logger(output=None, label='nessai', log_level='WARNING'):
"""
Setup the logger.
Based on the implementation in Bilby:
https://git.ligo.org/lscsoft/bilby/-/blob/master/bilby/core/utils/log.py
Parameters
----------
output : str, optional
Path of to output directory.
label : str, optional
Label for this instance of the logger.
log_level : {'ERROR', 'WARNING', 'INFO', 'DEBUG'}, optional
Level of logging passed to logger.
Returns
-------
:obj:`logging.Logger`
Instance of the Logger class.
"""
from .. import __version__ as version
if type(log_level) is str:
try:
level = getattr(logging, log_level.upper())
except AttributeError:
raise ValueError('log_level {} not understood'.format(log_level))
else:
level = int(log_level)
logger = logging.getLogger('nessai')
logger.setLevel(level)
if any([type(h) == logging.StreamHandler for h in logger.handlers]) \
is False:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(
'%(asctime)s %(name)s %(levelname)-8s: %(message)s',
datefmt='%m-%d %H:%M'))
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
if any([type(h) == logging.FileHandler for h in logger.handlers]) is False:
if label:
if output:
if not os.path.exists(output):
os.makedirs(output, exist_ok=True)
else:
output = '.'
log_file = os.path.join(output, f'{label}.log')
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)-8s: %(message)s', datefmt='%H:%M'))
file_handler.setLevel(level)
logger.addHandler(file_handler)
for handler in logger.handlers:
handler.setLevel(level)
logger.info(f'Running Nessai version {version}')
return logger
| 30.194444
| 79
| 0.600276
|
4a133fd5e27fab96c7d58c75d7102735286243e3
| 1,658
|
py
|
Python
|
config/wsgi.py
|
mamecheikh-debug/gaynde
|
aabad48fd411df52285f3da83617643bd60a6a96
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
mamecheikh-debug/gaynde
|
aabad48fd411df52285f3da83617643bd60a6a96
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
mamecheikh-debug/gaynde
|
aabad48fd411df52285f3da83617643bd60a6a96
|
[
"MIT"
] | null | null | null |
"""
WSGI config for Gaynde project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# gaynde directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "gaynde"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 42.512821
| 79
| 0.801568
|
4a1343045511fa73dcec00afc8683a2b799e6b68
| 1,487
|
py
|
Python
|
tests/python/test_struct.py
|
xwang186/taichi
|
1a8ec6ebfae8b3859fd91d4889e2de3c12f1dde2
|
[
"MIT"
] | null | null | null |
tests/python/test_struct.py
|
xwang186/taichi
|
1a8ec6ebfae8b3859fd91d4889e2de3c12f1dde2
|
[
"MIT"
] | null | null | null |
tests/python/test_struct.py
|
xwang186/taichi
|
1a8ec6ebfae8b3859fd91d4889e2de3c12f1dde2
|
[
"MIT"
] | null | null | null |
import taichi as ti
from tests import test_utils
@test_utils.test()
def test_linear():
x = ti.field(ti.i32)
y = ti.field(ti.i32)
n = 128
ti.root.dense(ti.i, n).place(x)
ti.root.dense(ti.i, n).place(y)
for i in range(n):
x[i] = i
y[i] = i + 123
for i in range(n):
assert x[i] == i
assert y[i] == i + 123
def test_linear_repeated():
for i in range(10):
test_linear()
@test_utils.test()
def test_linear_nested():
x = ti.field(ti.i32)
y = ti.field(ti.i32)
n = 128
ti.root.dense(ti.i, n // 16).dense(ti.i, 16).place(x)
ti.root.dense(ti.i, n // 16).dense(ti.i, 16).place(y)
for i in range(n):
x[i] = i
y[i] = i + 123
for i in range(n):
assert x[i] == i
assert y[i] == i + 123
@test_utils.test()
def test_linear_nested_aos():
x = ti.field(ti.i32)
y = ti.field(ti.i32)
n = 128
ti.root.dense(ti.i, n // 16).dense(ti.i, 16).place(x, y)
for i in range(n):
x[i] = i
y[i] = i + 123
for i in range(n):
assert x[i] == i
assert y[i] == i + 123
@test_utils.test(exclude=[ti.vulkan])
def test_2d_nested():
x = ti.field(ti.i32)
n = 128
ti.root.dense(ti.ij, n // 16).dense(ti.ij, (32, 16)).place(x)
for i in range(n * 2):
for j in range(n):
x[i, j] = i + j * 10
for i in range(n * 2):
for j in range(n):
assert x[i, j] == i + j * 10
| 18.358025
| 65
| 0.507061
|
4a1343771bb00d447ef95f72b517c9ed03fc3c33
| 7,263
|
py
|
Python
|
skbot/transform/simplfy.py
|
FirefoxMetzger/scikit-bot
|
ee6f1d3451a3c61a6fa122cc42efc4dd67afc9c9
|
[
"Apache-2.0"
] | 3
|
2021-09-09T08:33:06.000Z
|
2021-12-22T13:51:49.000Z
|
skbot/transform/simplfy.py
|
FirefoxMetzger/scikit-bot
|
ee6f1d3451a3c61a6fa122cc42efc4dd67afc9c9
|
[
"Apache-2.0"
] | 31
|
2021-08-12T08:12:58.000Z
|
2022-03-21T23:16:36.000Z
|
skbot/transform/simplfy.py
|
FirefoxMetzger/scikit-bot
|
ee6f1d3451a3c61a6fa122cc42efc4dd67afc9c9
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
from .base import CompundLink, Link, InvertLink
from .affine import AffineCompound, Translation, Rotation
from .joints import Joint
import numpy as np
def simplify_links(
links: List[Link],
*,
keep_links: List[Link] = None,
keep_joints: bool = False,
eps: float = 1e-16
) -> List[Link]:
"""Simplify a transformation sequence.
.. currentmodule:: skbot.transform
This function attempts to optimize the given transformation sequence by
reducing the number of transformations involved. For this it may replace or
modify any link in the sequence with the exception of those listed in
``keep_links``. Concretely it does the following modifications:
- It (recursively) flattens :class:`CompoundLinks <CompundLink>`.
- It replaces double inversions with the original link.
- It drops 0 degree :class:`Rotations <Rotation>` (identities).
- It drops 0 amount :class:`Translations <Translation>` (identities).
- It combines series of translations into a single translation.
- It sorts translations before rotations.
.. versionadded:: 0.10.0
Parameters
----------
links : List[Link]
The list of links to simplify.
keep_links : List[Link]
A list list of links that - if present - should not be simplified.
keep_joints : bool
If True treat tf.Joint instances as if they were in keep_links.
eps : float
The number below which angles and translations are interpreted as 0.
Defaults to ``1e-16``.
Returns
-------
improved_links : List[Link]
A new list of links that is a simplified version of the initial list.
"""
if keep_links is None:
keep_links = list()
def simplify(links: List[Link]) -> List[Link]:
improved_links: List[Link] = list()
for idx in range(len(links)):
link = links[idx]
# skip if link should not be modified
if link in keep_links or (isinstance(link, Joint) and keep_joints):
improved_links.append(link)
continue
# resolve inversions
if isinstance(link, InvertLink):
inverted_link = link._forward_link
# still don't touch keep links
if inverted_link in keep_links or (
isinstance(inverted_link, Joint) and keep_joints
):
improved_links.append(link)
continue
# double inverse
if isinstance(inverted_link, InvertLink):
improved_links.append(inverted_link._forward_link)
continue
# inverted compound link
if isinstance(inverted_link, (CompundLink, AffineCompound)):
for sub_link in reversed(inverted_link._links):
improved_links.append(InvertLink(sub_link))
continue
# inverted translation
if isinstance(inverted_link, Translation):
resolved = Translation(
inverted_link.direction,
amount=-inverted_link.amount,
axis=inverted_link._axis,
)
improved_links.append(resolved)
continue
# inverted rotation
if isinstance(inverted_link, Rotation):
angle = inverted_link.angle
resolved = Rotation(
inverted_link._u,
inverted_link._u_ortho,
axis=inverted_link._axis,
)
resolved.angle = -angle
improved_links.append(resolved)
continue
# unpack compound links
if isinstance(link, (CompundLink, AffineCompound)):
for sub_link in link._links:
improved_links.append(sub_link)
continue
# drop identity translations
if isinstance(link, Translation) and abs(link.amount) < eps:
continue
# drop identity rotations
if isinstance(link, Rotation) and abs(link.angle) < eps:
continue
# no improvements for this link
improved_links.append(link)
if len(improved_links) != len(links):
improved_links = simplify(improved_links)
elif any([a != b for a, b in zip(links, improved_links)]):
improved_links = simplify(improved_links)
return improved_links
def combine_translations(links: List[Link]) -> List[Link]:
improved_links: List[Link] = list()
idx = 0
while idx < len(links):
link = links[idx]
if not isinstance(link, Translation):
improved_links.append(link)
idx += 1
continue
translations: List[Translation] = list()
for sub_link in links[idx:]:
if not isinstance(sub_link, Translation):
break
translations.append(sub_link)
new_direction = np.zeros(link.parent_dim)
for sub_link in translations:
new_direction += sub_link.amount * sub_link.direction
improved_links.append(Translation(new_direction))
idx += len(translations)
return improved_links
def sort_links(links: List[Link]) -> List[Link]:
improved_links: List[Link] = [x for x in links]
repeat = True
while repeat:
repeat = False
for idx in range(len(improved_links) - 1):
link = improved_links[idx]
next_link = improved_links[idx + 1]
if isinstance(link, Rotation) and isinstance(next_link, Translation):
vector = next_link.amount * next_link.direction
vector = link.__inverse_transform__(vector)
improved_links[idx + 1] = improved_links[idx]
improved_links[idx] = Translation(vector)
repeat = True
continue
return improved_links
improved_links = simplify(links)
subchains: List[List[Link]] = list()
keepsies: List[Link] = list()
current_subchain: List[Link] = list()
for link in improved_links:
if link in keep_links or (isinstance(link, Joint) and keep_joints):
keepsies.append(link)
subchains.append(current_subchain)
current_subchain = list()
else:
current_subchain.append(link)
subchains.append(current_subchain)
improved_chains: List[List[Link]] = list()
for subchain in subchains:
improved_links = sort_links(subchain)
improved_links = combine_translations(improved_links)
improved_chains.append(improved_links)
improved_chain: List[Link] = list()
for chain, keepsie in zip(improved_chains, keepsies):
improved_chain += chain
improved_chain += [keepsie]
improved_chain += improved_chains[-1]
return improved_chain
| 34.585714
| 85
| 0.583092
|
4a1343b072633916cf054e5536ebd7776bb0a52a
| 2,614
|
py
|
Python
|
NLP/Full_classifier/Readmodels.py
|
AlexKH22/Machine_Learning
|
7d2ee3ad99b29cc3b19ea02487e644f3e2b993c9
|
[
"Apache-2.0"
] | null | null | null |
NLP/Full_classifier/Readmodels.py
|
AlexKH22/Machine_Learning
|
7d2ee3ad99b29cc3b19ea02487e644f3e2b993c9
|
[
"Apache-2.0"
] | null | null | null |
NLP/Full_classifier/Readmodels.py
|
AlexKH22/Machine_Learning
|
7d2ee3ad99b29cc3b19ea02487e644f3e2b993c9
|
[
"Apache-2.0"
] | null | null | null |
import random
import pickle
from nltk.classify import ClassifierI
from statistics import mode
from nltk.tokenize import word_tokenize
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
def classify(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes) * 100
return conf
documents_f = open("pickled_algos/documents.pickle", "rb")
documents = pickle.load(documents_f)
documents_f.close()
word_features5k_f = open("pickled_algos/word_features5k.pickle", "rb")
word_features = pickle.load(word_features5k_f)
word_features5k_f.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets_f = open("pickled_algos/featuresets.pickle", "rb")
featuresets = pickle.load(featuresets_f)
featuresets_f.close()
random.shuffle(featuresets)
# print(len(featuresets))
testing_set = featuresets[10000:]
training_set = featuresets[:10000]
open_file = open("pickled_algos/originalnaivebayes5k.pickle", "rb")
classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/MNB_classifier5k.pickle", "rb")
MNB_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/BernoulliNB_classifier5k.pickle", "rb")
BernoulliNB_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/LogisticRegression_classifier5k.pickle", "rb")
LogisticRegression_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/LinearSVC_classifier5k.pickle", "rb")
LinearSVC_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/SGDC_classifier5k.pickle", "rb")
SGDC_classifier = pickle.load(open_file)
open_file.close()
voted_classifier = VoteClassifier(classifier,
LinearSVC_classifier,
MNB_classifier,
BernoulliNB_classifier,
LogisticRegression_classifier)
def sentiment(text):
feats = find_features(text)
return voted_classifier.classify(feats), voted_classifier.confidence(feats)
| 24.203704
| 79
| 0.700842
|
4a1347096fae94208063106c6e3457715d894ea0
| 247
|
py
|
Python
|
frosch/style/token/__init__.py
|
HallerPatrick/frog
|
2a5eae6678a22c1f0a51be0b99fe2e45cbf7ff64
|
[
"MIT"
] | 204
|
2020-11-01T20:01:35.000Z
|
2022-02-17T17:57:43.000Z
|
frosch/style/token/__init__.py
|
HallerPatrick/frog
|
2a5eae6678a22c1f0a51be0b99fe2e45cbf7ff64
|
[
"MIT"
] | 58
|
2020-11-01T00:10:38.000Z
|
2022-03-24T19:20:30.000Z
|
frosch/style/token/__init__.py
|
HallerPatrick/frog
|
2a5eae6678a22c1f0a51be0b99fe2e45cbf7ff64
|
[
"MIT"
] | 6
|
2020-11-09T06:23:44.000Z
|
2021-03-26T21:22:43.000Z
|
"""
frosch - Better runtime errors
Patrick Haller
patrickhaller40@googlemail.com
License MIT
"""
from pygments.token import (
Keyword,
Name,
Comment,
String,
Error,
Number,
Operator,
Generic,
)
| 11.227273
| 34
| 0.603239
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.