hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1507696614c81e3043b1c781c1e2123ccbe7eb
| 9,535
|
py
|
Python
|
sts/happensbefore/hb_utils.py
|
nsg-ethz/SDNRacer
|
33353177998947580e879941f05862f0173a0c48
|
[
"Apache-2.0"
] | 5
|
2016-03-18T15:12:04.000Z
|
2019-01-28T20:18:24.000Z
|
sts/happensbefore/hb_utils.py
|
nsg-ethz/SDNRacer
|
33353177998947580e879941f05862f0173a0c48
|
[
"Apache-2.0"
] | null | null | null |
sts/happensbefore/hb_utils.py
|
nsg-ethz/SDNRacer
|
33353177998947580e879941f05862f0173a0c48
|
[
"Apache-2.0"
] | 1
|
2019-11-02T22:04:48.000Z
|
2019-11-02T22:04:48.000Z
|
"""
Various utils functions, some are copied from STS.
"""
import base64
from functools import partial
from pox.lib.addresses import EthAddr
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.icmp import icmp
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.icmp import _type_to_name as icmp_names
from pox.lib.packet.packet_utils import ipproto_to_str
from pox.openflow.flow_table import SwitchFlowTable
from pox.openflow.flow_table import TableEntry
from pox.openflow.software_switch import OFConnection
from pox.openflow.libopenflow_01 import ofp_flow_mod
from pox.openflow.libopenflow_01 import ofp_type_rev_map
from pox.openflow.libopenflow_01 import ofp_flow_mod_command_rev_map
from pox.openflow.libopenflow_01 import ofp_flow_removed_reason_rev_map
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
@classmethod
def _names(cls): # returns dict: ordinal -> string
return reverse
enums['_names'] = _names
@classmethod
def _ordinals(cls): # returns dict: string -> ordinal
# filter _names, _ordinals
return {k: v for k, v in enums.items() if not k.startswith('_')}
enums['_ordinals'] = _ordinals
return type('Enum', (), enums)
def check_list(obj):
if isinstance(obj, list):
return obj
return [obj] if obj else []
def get_port_no(obj):
"""
Try obj, obj.port_no, obj.port_no()
"""
if isinstance(obj, (basestring, int, long)):
return obj
if hasattr(obj, "port_no"):
port_no = getattr(obj, "port_no")
if isinstance(port_no, (basestring, int, long)):
return port_no
try:
port_no = port_no()
if isinstance(port_no, (basestring, int, long)):
return port_no
return str(port_no)
except:
return str(port_no)
return str(obj)
def base64_encode_raw(packet):
"""Calling pack() on a Openflow message might modify/add an XID."""
# base 64 occasionally adds extraneous newlines: bit.ly/aRTmNu
if packet is None:
return None
return base64.b64encode(packet).replace("\n", "")
def base64_encode(packet):
"""Encode packet to base64 string"""
if hasattr(packet, "pack"):
packet = packet.pack()
# base 64 occasionally adds extraneous newlines: bit.ly/aRTmNu
return base64_encode_raw(packet)
def base64_decode(data):
"""Decode base64 string"""
return base64.b64decode(data)
def base64_decode_openflow(data):
"""Decode openflow message from base64 string to msg object"""
(msg, packet_length) = OFConnection.parse_of_packet(base64_decode(data))
return msg
def decode_flow_mod(data):
"""Decode flow mod from base64 string to ofp_flow_mod object."""
if data is None:
return None
bits = base64_decode(data)
fm = ofp_flow_mod()
fm.unpack(bits) # NOTE: unpack IS in-situ for ofp_flow_mod() type
return fm
def decode_packet(data):
"""Decode a packet in base64 string to pox.lib.packet.ethernet object."""
bits = base64_decode(data)
p = ethernet()
p = p.unpack(bits) # NOTE: unpack IS NOT in-situ for ethernet() type
return p
def decode_flow_table(data):
"""Decode a list of flow from base64 to SwitchFlowTable object."""
table = SwitchFlowTable()
for row in data:
flow_mod = decode_flow_mod(row)
entry = TableEntry.from_flow_mod(flow_mod)
table.add_entry(entry)
return table
def base64_encode_flow(flow, set_zero_XID=False):
"""
Optionally set the xid to 0 right before encoding, to enable comparisons on the base64 string.
"""
if not hasattr(flow, 'to_flow_mod'):
tmp = flow
else:
tmp = flow.to_flow_mod()
if flow is None:
return None
else:
if set_zero_XID:
tmp.xid = 0
return base64_encode(tmp)
def base64_encode_flow_list(flows, set_zero_XID=False):
return None if flows is None else [base64_encode_flow(entry, set_zero_XID) for entry in flows]
def base64_encode_flow_table(flow_table, set_zero_XID=False):
return None if flow_table is None else base64_encode_flow_list(flow_table.table, set_zero_XID)
def compare_flow_table(table, other):
fm1 = []
for i in table.table:
fm1.append(i.to_flow_mod())
fm2 = []
for i in other.table:
fm2.append(i.to_flow_mod())
# TODO(jm): This could be improved by using anewer version of POX,
# where flow table entries are always in priority order.
# Then only one pass would be necessary.
for i in fm1:
if i not in fm2:
return False
for i in fm2:
if i not in fm1:
return False
return True
def read_flow_table(table, packet, in_port):
return table.entry_for_packet(packet, in_port)
def write_flow_table(table, flow_mod):
return table.process_flow_mod(flow_mod)
def find_entries_in_flow_table(table, other_flow_mod):
other = ofp_flow_mod()
other.unpack(other_flow_mod.pack())
other.xid = 0
found = []
for i in table.table:
this_entry = ofp_flow_mod()
this_entry.unpack(i.to_flow_mod().pack())
this_entry.xid = 0
if this_entry == other:
found.append(i)
return found
def nCr(n,r):
"""
Implements multiplicative formula:
https://en.wikipedia.org/wiki/Binomial_coefficient#Multiplicative_formula
"""
if r < 0 or r > n:
return 0
if r == 0 or r == n:
return 1
c = 1
for i in xrange(min(r, n - r)):
c = c * (n - i) // (i + 1)
return c
def ofp_type_to_str(t):
return ofp_type_rev_map.keys()[ofp_type_rev_map.values().index(t)]
def ofp_flow_removed_reason_to_str(r):
return ofp_flow_removed_reason_rev_map.keys()[ofp_flow_removed_reason_rev_map.values().index(r)]
def str_to_ofp_flow_removed_reason(r):
return ofp_flow_removed_reason_rev_map[r]
def ofp_flow_mod_command_to_str(t):
return ofp_flow_mod_command_rev_map.keys()[ofp_flow_mod_command_rev_map.values().index(t)]
def eth_repr(pkt):
s = ''.join(('ETH: ', '[', str(EthAddr(pkt.src)), '>', str(EthAddr(pkt.dst)), ':',
ethernet.getNameForType(pkt.type), ']'))
if pkt.next is None:
pass
elif pkt.type == ethernet.LLDP_TYPE:
s += "| LLDP"
elif pkt.type == 35138:
print "BUGGY PKT type {0} str type {1}".format(pkt.type, ethernet.getNameForType(pkt.type))
s += "| Unkown PKT"
else:
s += "|" + str(pkt.next)
return '\\n'.join(s.split('|'))
def icmp_repr(pkt):
t = icmp_names.get(pkt.type, str(pkt.type))
s = 'ICMP: {t:%s c:%i}' % (t, pkt.code)
if pkt.next is None:
return s
return '|' + ''.join((s, str(pkt.next)))
def ipv4_repr(pkt):
s = 'IPv4' + ''.join(('(','['#+'v:'+str(self.v),'hl:'+str(self.hl),\
# 'l:', str(self.iplen)
'ttl:', str(pkt.ttl), ']',
ipproto_to_str(pkt.protocol), \
# ' cs:', '%x' %self.csum,
'[',str(pkt.srcip), '>', str(pkt.dstip),'])'))
if pkt.next == None:
return s
return '|' + ''.join((s, str(pkt.next)))
def pkt_info(packet):
"""
Returns a string representation of base64 encoded packet
Note: this function moneky patches __str__ in ethernet, icmp, ipv4, etc..
"""
ethernet.__str__ = eth_repr
icmp.__str__ = icmp_repr
ipv4.__str__ = ipv4_repr
return str(packet)
def op_to_str(op):
"""Helper function to pretty print Operations"""
if op.type == 'TraceSwitchFlowTableWrite':
opstr = "Write: "
elif op.type == 'TraceSwitchFlowTableRead':
opstr = "Read: "
else:
opstr = op.type + ": "
if op.flow_mod:
opstr += ofp_flow_mod_command_to_str(op.flow_mod.command)
opstr += " => " + TableEntry.from_flow_mod(op.flow_mod).show()
elif hasattr(op, 'packet'):
opstr += str(op.packet)
else:
opstr += "None"
return opstr
def dfs_edge_filter(G, source, edges_iter_func=lambda g, start: iter(g[start]), filter_msg_type=None):
"""
Do DFS over graph G starting from optional source.
edges_iter_func is a function that takes two arguments (graph and a node) then
it returns iterator over nodes connected to the start. This gives us the
ability to interpose and filter certain edges.
"""
if source is None:
# produce edges for all components
nodes = G
else:
# produce edges for components with source
nodes = [source]
visited=set()
for start in nodes:
if start in visited:
continue
visited.add(start)
stack = [(start, edges_iter_func(G, start))]
while stack:
parent,children = stack[-1]
try:
child = next(children)
if filter_msg_type and \
isinstance(G.node[child].get('event', None), filter_msg_type):
continue
if child not in visited:
yield parent,child
visited.add(child)
stack.append((child, edges_iter_func(G, child)))
except StopIteration:
stack.pop()
def rel_filter(G, source, rel):
for eid, attrs in G[source].iteritems():
if attrs['rel'] == rel:
yield eid
just_mid_iter = partial(rel_filter, rel='mid')
def pretty_match(match):
if not match:
return ''
outstr = ''
def append (f, formatter=str):
v = match.__getattr__(f)
if v is None: return ''
return f + ": " + formatter(v) + " "
outstr = ''
outstr += append('in_port')
outstr += append('dl_src')
outstr += append('dl_dst')
outstr += append('dl_vlan')
outstr += append('dl_vlan_pcp')
outstr += append('dl_type')
outstr += append('nw_tos')
outstr += append('nw_proto')
outstr += append('nw_src')
outstr += append('nw_dst')
outstr += append('tp_src')
outstr += append('tp_dst')
return outstr
| 27.961877
| 102
| 0.674253
|
4a150774caa60919cac3e2f898b479ff886d7b52
| 1,038
|
py
|
Python
|
utils/tidfit_utils.py
|
woctezuma/steam-reviews-to-sales
|
51ca279d07a2a1cf26d099a5cfe51566760298cd
|
[
"MIT"
] | 3
|
2021-08-08T21:06:19.000Z
|
2021-12-27T05:29:50.000Z
|
utils/tidfit_utils.py
|
woctezuma/steam-reviews-to-sales
|
51ca279d07a2a1cf26d099a5cfe51566760298cd
|
[
"MIT"
] | null | null | null |
utils/tidfit_utils.py
|
woctezuma/steam-reviews-to-sales
|
51ca279d07a2a1cf26d099a5cfe51566760298cd
|
[
"MIT"
] | null | null | null |
import tidfit
from matplotlib import pyplot as plt
from utils.plot_utils import plot_arrays
def run_tidfit(X, y, model_str, xlabel="#reviews", ylabel="#owners"):
# Caveat: this is based on the L2 error, and thus is sensitive to the pre-processing of outliers.
# Reference: https://github.com/aminnj/tidfit
x_train = X.squeeze()
y_train = y.squeeze()
plot_arrays(x_train, y_train, xlabel=xlabel, ylabel=ylabel)
out = tidfit.fit(model_str, x_train, y_train)
plt.show()
return out
def run_linear_tidfit(X, y, xlabel="#reviews", ylabel="#owners"):
# Linear regression, without and with an intercept
out_a = run_tidfit(X, y, "a*x", xlabel=xlabel, ylabel=ylabel)
out_ab = run_tidfit(X, y, "a*x+b", xlabel=xlabel, ylabel=ylabel)
return out_a, out_ab
def run_chance_tidfit(X, y, xlabel="#reviews", ylabel="#owners"):
# This is equivalent to fitting the chance to write a review: (x/y) ~ a*x+b
out = run_tidfit(X, y, "x / (a*x+b)", xlabel=xlabel, ylabel=ylabel)
return out
| 28.833333
| 101
| 0.686898
|
4a1507eba615d538abd7e10210a7e0211e15bad9
| 7,474
|
py
|
Python
|
benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/7-extending_bound_10.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/7-extending_bound_10.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/7-extending_bound_10.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
return frozenset(res)
| 37.18408
| 89
| 0.629783
|
4a15082b2b439f6c44d9c7995bbbffb3134c7de6
| 1,384
|
py
|
Python
|
nipype/interfaces/setup.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/setup.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/setup.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function, division, unicode_literals, absolute_import
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('interfaces', parent_package, top_path)
config.add_subpackage('afni')
config.add_subpackage('ants')
config.add_subpackage('camino')
config.add_subpackage('camino2trackvis')
config.add_subpackage('cmtk')
config.add_subpackage('diffusion_toolkit')
config.add_subpackage('dipy')
config.add_subpackage('elastix')
config.add_subpackage('freesurfer')
config.add_subpackage('fsl')
config.add_subpackage('minc')
config.add_subpackage('mipav')
config.add_subpackage('mne')
config.add_subpackage('mrtrix')
config.add_subpackage('mrtrix3')
config.add_subpackage('niftyfit')
config.add_subpackage('niftyreg')
config.add_subpackage('niftyseg')
config.add_subpackage('nipy')
config.add_subpackage('spm')
config.add_subpackage('slicer')
config.add_data_dir('script_templates')
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 32.952381
| 82
| 0.723988
|
4a1508e11f82aff71380a1345946149dd1a19635
| 3,234
|
py
|
Python
|
gamestonk_terminal/cryptocurrency/crypto_controller.py
|
alexgallego1997/GamestonkTerminal
|
1c6ce5c99111aa7195c51f6930fcdbb9dadd2f00
|
[
"MIT"
] | 3
|
2021-04-13T06:26:46.000Z
|
2022-01-26T05:11:22.000Z
|
gamestonk_terminal/cryptocurrency/crypto_controller.py
|
alexgallego1997/GamestonkTerminal
|
1c6ce5c99111aa7195c51f6930fcdbb9dadd2f00
|
[
"MIT"
] | 2
|
2021-04-17T10:24:06.000Z
|
2021-04-17T10:44:59.000Z
|
gamestonk_terminal/cryptocurrency/crypto_controller.py
|
alexgallego1997/GamestonkTerminal
|
1c6ce5c99111aa7195c51f6930fcdbb9dadd2f00
|
[
"MIT"
] | null | null | null |
__docformat__ = "numpy"
import argparse
import pandas as pd
import matplotlib.pyplot as plt
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import get_flair
from gamestonk_terminal.menu import session
from gamestonk_terminal.cryptocurrency import coin_api
class CryptoController:
CHOICES = ["help", "q", "quit", "load", "view"]
def __init__(self):
""" CONSTRUCTOR """
self.crypto_parser = argparse.ArgumentParser(add_help=False, prog="crypto")
self.crypto_parser.add_argument("cmd", choices=self.CHOICES)
self.current_coin = None
self.current_df = pd.DataFrame()
@staticmethod
def print_help(current_coin):
""" Print help """
print("\nCryptocurrency:")
print(" help show this menu again")
print(" q quit this menu, and shows back to main menu")
print(" quit quit to abandon program")
print(f"\nCurrent Coin: {current_coin}")
print("")
print(" load load cryptocurrency data")
print(" view load and view cryptocurrency data")
print("")
def switch(self, an_input: str):
"""Process and dispatch input
Returns
-------
True, False or None
False - quit the menu
True - quit the program
None - continue in the menu
"""
(known_args, other_args) = self.crypto_parser.parse_known_args(an_input.split())
return getattr(
self, "call_" + known_args.cmd, lambda: "Command not recognized!"
)(other_args)
def call_help(self, _):
"""Process Help command"""
self.print_help(self.current_coin)
def call_q(self, _):
"""Process Q command - quit the menu"""
return False
def call_quit(self, _):
"""Process Quit command - quit the program"""
return True
def call_load(self, other_args):
self.current_coin, self.current_df = coin_api.load(other_args)
def call_view(self, other_args):
if self.current_coin:
coin_api.view(self.current_coin, self.current_df, other_args)
else:
print("No coin selected. Use 'load' to load the coin you want to look at.")
print("")
def menu():
crypto_controller = CryptoController()
crypto_controller.print_help(crypto_controller.current_coin)
plt.close("all")
while True:
# Get input command from user
if session and gtff.USE_PROMPT_TOOLKIT:
completer = NestedCompleter.from_nested_dict(
{c: None for c in crypto_controller.CHOICES}
)
an_input = session.prompt(
f"{get_flair()} (crypto)> ",
completer=completer,
)
else:
an_input = input(f"{get_flair()} (crypto)> ")
try:
process_input = crypto_controller.switch(an_input)
if process_input is not None:
return process_input
except SystemExit:
print("The command selected doesn't exist\n")
continue
| 31.096154
| 88
| 0.607607
|
4a150a17cbe03d3e5b5db7dd5aa7bdb03884c6e8
| 1,268
|
py
|
Python
|
couchapp/autopush/command.py
|
JKDingwall/couchapp
|
447aa0ce2d41b47f454fdfbefc1973a392603046
|
[
"Apache-2.0"
] | null | null | null |
couchapp/autopush/command.py
|
JKDingwall/couchapp
|
447aa0ce2d41b47f454fdfbefc1973a392603046
|
[
"Apache-2.0"
] | null | null | null |
couchapp/autopush/command.py
|
JKDingwall/couchapp
|
447aa0ce2d41b47f454fdfbefc1973a392603046
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of couchapp released under the Apache 2 license.
# See the NOTICE for more information.
import logging
import os
import sys
from couchapp.autopush import DEFAULT_UPDATE_DELAY
from couchapp.errors import AppError
from couchapp.localdoc import document
if sys.platform == "win32" or os.name == "nt":
from couchapp.autopush.winwatcher import WinCouchappWatcher as CouchappWatcher
else:
from couchapp.autopush.watcher import CouchappWatcher
log = logging.getLogger(__name__)
def autopush(conf, path, *args, **opts):
doc_path = None
dest = None
if len(args) < 2:
doc_path = path
if args:
dest = args[0]
else:
doc_path = os.path.normpath(os.path.join(os.getcwd(),
args[0]))
dest = args[1]
if doc_path is None:
raise AppError("You aren't in a couchapp.")
conf.update(doc_path)
doc = document(doc_path, create=False,
docid=opts.get('docid'))
dbs = conf.get_dbs(dest)
update_delay = int(opts.get('update_delay', DEFAULT_UPDATE_DELAY))
noatomic = opts.get('no_atomic', False)
watcher = CouchappWatcher(doc, dbs, update_delay=update_delay,
noatomic=noatomic)
watcher.run()
| 26.978723
| 86
| 0.66877
|
4a150a1c890646883c4fbb19dcfe1ba1e2a0a7e8
| 5,510
|
py
|
Python
|
tensorflow_datasets/core/file_adapters.py
|
BearerPipelineTest/datasets
|
4f8ae1bcf8727aca31ad99d3083e707a1b98036c
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/file_adapters.py
|
BearerPipelineTest/datasets
|
4f8ae1bcf8727aca31ad99d3083e707a1b98036c
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/file_adapters.py
|
BearerPipelineTest/datasets
|
4f8ae1bcf8727aca31ad99d3083e707a1b98036c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapters for file formats."""
import abc
import enum
import os
from typing import Any, ClassVar, Dict, Iterable, List, Optional, Type
from etils import epath
import tensorflow as tf
from tensorflow_datasets.core.utils import type_utils
ExamplePositions = List[Any]
class FileFormat(enum.Enum):
"""Format of the record files.
The values of the enumeration are used as filename endings/suffix.
"""
TFRECORD = 'tfrecord'
RIEGELI = 'riegeli'
@property
def file_suffix(self) -> str:
return ADAPTER_FOR_FORMAT[self].FILE_SUFFIX
DEFAULT_FILE_FORMAT = FileFormat.TFRECORD
class FileAdapter(abc.ABC):
"""Interface for Adapter objects which read and write examples in a format."""
FILE_SUFFIX: ClassVar[str]
@classmethod
@abc.abstractmethod
def make_tf_data(
cls,
filename: epath.PathLike,
buffer_size: Optional[int] = None,
) -> tf.data.Dataset:
"""Returns TensorFlow Dataset comprising given record file."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def write_examples(
cls,
path: epath.PathLike,
iterator: Iterable[type_utils.KeySerializedExample],
) -> Optional[ExamplePositions]:
"""Write examples from given iterator in given path.
Args:
path: Path where to write the examples.
iterator: Iterable of examples.
Returns:
List of record positions for each record in the given iterator. In case of
TFRecords, does not return anything.
"""
raise NotImplementedError()
class TfRecordFileAdapter(FileAdapter):
"""File adapter for TFRecord file format."""
FILE_SUFFIX = 'tfrecord'
@classmethod
def make_tf_data(
cls,
filename: epath.PathLike,
buffer_size: Optional[int] = None,
) -> tf.data.Dataset:
"""Returns TensorFlow Dataset comprising given record file."""
return tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
@classmethod
def write_examples(
cls,
path: epath.PathLike,
iterator: Iterable[type_utils.KeySerializedExample],
) -> Optional[ExamplePositions]:
"""Write examples from given iterator in given path.
Args:
path: Path where to write the examples.
iterator: Iterable of examples.
Returns:
None
"""
with tf.io.TFRecordWriter(os.fspath(path)) as writer:
for _, serialized_example in iterator:
writer.write(serialized_example)
writer.flush()
class RiegeliFileAdapter(FileAdapter):
"""File adapter for Riegeli file format."""
FILE_SUFFIX = 'riegeli'
@classmethod
def make_tf_data(
cls,
filename: epath.PathLike,
buffer_size: Optional[int] = None,
) -> tf.data.Dataset:
from riegeli.tensorflow.ops import riegeli_dataset_ops as riegeli_tf # pylint: disable=g-import-not-at-top
return riegeli_tf.RiegeliDataset(filename, buffer_size=buffer_size)
@classmethod
def write_examples(
cls,
path: epath.PathLike,
iterator: Iterable[type_utils.KeySerializedExample],
) -> Optional[ExamplePositions]:
"""Write examples from given iterator in given path.
Args:
path: Path where to write the examples.
iterator: Iterable of examples.
Returns:
List of record positions for each record in the given iterator.
"""
positions = []
import riegeli # pylint: disable=g-import-not-at-top
with tf.io.gfile.GFile(os.fspath(path), 'wb') as f:
with riegeli.RecordWriter(f, options='transpose') as writer:
for _, record in iterator:
writer.write_record(record)
positions.append(writer.last_pos)
return positions
def _to_bytes(key: type_utils.Key) -> bytes:
"""Convert the key to bytes."""
if isinstance(key, int):
return key.to_bytes(128, byteorder='big') # Use 128 as this match md5
elif isinstance(key, bytes):
return key
elif isinstance(key, str):
return key.encode('utf-8')
else:
raise TypeError(f'Invalid key type: {type(key)}')
# Create a mapping from FileFormat -> FileAdapter.
ADAPTER_FOR_FORMAT: Dict[FileFormat, Type[FileAdapter]] = {
FileFormat.RIEGELI: RiegeliFileAdapter,
FileFormat.TFRECORD: TfRecordFileAdapter,
}
_FILE_SUFFIX_TO_FORMAT = {
adapter.FILE_SUFFIX: file_format
for file_format, adapter in ADAPTER_FOR_FORMAT.items()
}
def file_format_from_suffix(file_suffix: str) -> FileFormat:
"""Returns the file format associated with the file extension (`tfrecord`)."""
if file_suffix not in _FILE_SUFFIX_TO_FORMAT:
raise ValueError('Unrecognized file extension: Should be one of '
f'{list(_FILE_SUFFIX_TO_FORMAT.values())}')
return _FILE_SUFFIX_TO_FORMAT[file_suffix]
def is_example_file(filename: str) -> bool:
"""Whether the given filename is a record file."""
return any(f'.{adapter.FILE_SUFFIX}' in filename
for adapter in ADAPTER_FOR_FORMAT.values())
| 28.848168
| 111
| 0.709437
|
4a150a44cd72621d3220eb388bba4a664e89c133
| 50,275
|
py
|
Python
|
rapid7vmconsole/models/report_filters.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
rapid7vmconsole/models/report_filters.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
rapid7vmconsole/models/report_filters.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ReportFilters(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""ReportFilters - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReportFilters):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 591.470588
| 48,045
| 0.489209
|
4a150ce2ef19931315652372d24da6b0b9950b5c
| 669
|
py
|
Python
|
examples/animates/repeating.sine.curve.py
|
strakam/PyEasyGraphics
|
57a586aa92385d26725d4ec3d61b2bbbe970195d
|
[
"BSD-3-Clause"
] | 5
|
2019-09-23T05:15:47.000Z
|
2021-01-17T08:06:47.000Z
|
examples/animates/repeating.sine.curve.py
|
strakam/PyEasyGraphics
|
57a586aa92385d26725d4ec3d61b2bbbe970195d
|
[
"BSD-3-Clause"
] | 3
|
2019-05-03T05:25:17.000Z
|
2021-04-15T04:53:16.000Z
|
examples/animates/repeating.sine.curve.py
|
strakam/PyEasyGraphics
|
57a586aa92385d26725d4ec3d61b2bbbe970195d
|
[
"BSD-3-Clause"
] | 4
|
2019-05-04T13:42:40.000Z
|
2021-04-15T10:38:48.000Z
|
"""
Repeating sine curve
Adapted form "Processing Creative Coding and Computational Art", Page 133.
"""
import math
from easygraphics import *
def main():
init_graph(400, 400)
set_render_mode(RenderMode.RENDER_MANUAL)
set_background_color("white")
angle = 0
y = 0
amplitude = 72
wave_gap = 14
frequency = 6
while is_run():
if y < get_height():
py = 0
for i in range(get_width()):
py = y + math.sin(math.radians(angle)) * amplitude
draw_point(i, py)
angle += frequency
y += wave_gap
delay_fps(30)
close_graph()
easy_run(main)
| 21.580645
| 74
| 0.57997
|
4a150d5b95a409ff5d33b2c7eb140e42ad3d304c
| 5,737
|
py
|
Python
|
socfaker/sysmon.py
|
priamai/soc-faker
|
51b587f0cec52212136905280406e915006d2afc
|
[
"MIT"
] | 122
|
2020-02-21T16:06:54.000Z
|
2022-03-21T13:53:03.000Z
|
socfaker/sysmon.py
|
priamai/soc-faker
|
51b587f0cec52212136905280406e915006d2afc
|
[
"MIT"
] | 13
|
2020-01-29T16:37:05.000Z
|
2022-01-27T21:30:10.000Z
|
socfaker/sysmon.py
|
priamai/soc-faker
|
51b587f0cec52212136905280406e915006d2afc
|
[
"MIT"
] | 20
|
2020-04-10T11:59:29.000Z
|
2022-02-10T09:20:26.000Z
|
import os
import fnmatch
from string import Formatter
from .computer import Computer
from .file import File
from .organization import Organization
from .employee import Employee
from .network import Network
from .registry import Registry
from .timestamp import Timestamp
from .baseclass import BaseClass
class SysMon(BaseClass):
"""The SysMon class will generate fake sysmon logs for Microsoft Windows operating systems
Returns:
SysMon: Returns an object containing a get method to retrieve generated sysmon logs
"""
__DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'sysmon'))
def __init__(self):
super(SysMon, self).__init__()
self.templates = self.__check_file_directory()
def get(self, count=1):
"""Returns a list of generated sysmon logs
Args:
count (int, optional): The number of sysmon logs to return. Defaults to 21.
Returns:
list: A list of generated sysmon logs
"""
return_list = []
current_count = 1
shuffled = self.random.sample(self.templates, len(self.templates))
for template in shuffled:
while current_count <= count:
current_count += 1
with open(template, 'r') as file:
data = file.read()
names = [fn for _, fn, _, _ in Formatter().parse(data) if fn is not None]
computer = Computer()
f = File()
org = Organization()
emp = Employee()
net = Network()
properties = {}
for item in names:
if item == 'guid':
properties['guid'] = str(self.uuid.uuid4())
elif item == 'timestamp':
properties['timestamp'] = Timestamp().in_the_future(days=self.random.randint(1,15), hours=self.random.randint(1,24), minutes=self.random.randint(1,60), seconds=self.random.randint(1,60))
elif item == 'creation_time':
properties['creation_time'] = Timestamp().in_the_past(days=self.random.randint(1,15), hours=self.random.randint(1,24), minutes=self.random.randint(1,60), seconds=self.random.randint(1,60))
elif item == 'previous_creation_time':
properties['previous_creation_time'] = properties['timestamp']
elif item == 'process_id':
properties['process_id'] = self.random.randint(2000,4000)
elif item == 'thread_id':
properties['thread_id'] = self.random.randint(2000,4000)
elif item == 'image_path':
f = File()
properties['image_path'] = '{}\\{}'.format(f.full_path, f.name)
elif item == 'current_directory':
properties['current_directory'] = '{}'.format(f.full_path)
elif item == 'parent_path':
properties['parent_path'] = '{}'.format(f.full_path)
elif item == 'process_state':
properties['process_state'] = 'Loaded'
elif item == 'computer_name':
properties['computer_name'] = computer.name
elif item == 'domain':
properties['domain'] = org.domain
elif item == 'user':
properties['user'] = emp.username
elif item == 'protocol':
properties['protocol'] = net.protocol
elif item == 'source_ip':
properties['source_ip'] = Network().private_ipv4
elif item == 'source_port':
properties['source_port'] = self.random.randint(100,5000)
elif item == 'destination_ip':
properties['destination_ip'] = net.ipv4
elif item == 'destination_port':
properties['destination_port'] = self.random.randint(100,5000)
elif item == 'target_filename':
properties['target_filename'] = f.name
elif item == 'registry_object':
properties['registry_object'] = '{}'.format(Registry().key)
elif item == 'registry_value':
properties['registry_value'] = properties['registry_object'].split('\\',1)[1]
elif item == 'exe':
properties['exe'] = f.name
elif item == 'sha1':
properties['sha1'] = f.sha1
elif item == 'sha256':
properties['sha256'] = f.sha256
elif item == 'signed':
properties['signed'] = f.signed
elif item == 'signature':
properties['signature'] = f.signature
elif item == 'signature_status':
properties['signature_status'] = f.signature_status
elif item == 'integrity_level':
properties['integrity_level'] = self.random.choice(['Low', 'Medium', 'High'])
return_list.append(data.format(**properties))
return return_list
def __check_file_directory(self):
matches = []
for root, dirnames, filenames in os.walk(self.__DATA_PATH):
for filename in fnmatch.filter(filenames, '*.txt'):
matches.append(os.path.abspath(os.path.join(root, filename)))
return matches
| 48.210084
| 212
| 0.524142
|
4a150dcd3ae8c30a3959b54206a3fca7ad67ebd0
| 3,320
|
py
|
Python
|
forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py
|
KGerring/forte
|
7dc6e6c7d62d9a4126bdfc5ca02d15be3ffd61ca
|
[
"Apache-2.0"
] | null | null | null |
forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py
|
KGerring/forte
|
7dc6e6c7d62d9a4126bdfc5ca02d15be3ffd61ca
|
[
"Apache-2.0"
] | 1
|
2022-02-23T23:21:03.000Z
|
2022-02-23T23:21:03.000Z
|
forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py
|
KGerring/forte
|
7dc6e6c7d62d9a4126bdfc5ca02d15be3ffd61ca
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Tuple
import numpy as np
from texar.torch.data import Vocab, Embedding
from ft.onto.base_ontology import Annotation
from forte.common.configuration import Config
from forte.processors.data_augment.algorithms.text_replacement_op import (
TextReplacementOp,
)
__all__ = [
"EmbeddingSimilarityReplacementOp",
]
class EmbeddingSimilarityReplacementOp(TextReplacementOp):
r"""
This class is a replacement op leveraging pre-trained word
embeddings, such as `word2vec` and `glove`, to replace the input
word with another word with similar word embedding.
By default, the replacement word is randomly chosen from the
top k words with the most similar embeddings.
Args:
configs:
The config should contain the following key-value pairs:
- vocab_path (str): The absolute path to the vocabulary file for
the pretrained embeddings
- embed_hparams (dict): The hparams to initialize the
texar.torch.data.Embedding object.
- top_k (int): the number of k most similar words to choose from
"""
def __init__(self, configs: Config):
super().__init__(configs)
self.vocab = Vocab(self.configs["vocab_path"])
embed_hparams = self.configs["embed_hparams"]
embedding = Embedding(self.vocab.token_to_id_map_py, embed_hparams)
self.normalized_vectors = (
embedding.word_vecs
/ np.sqrt((embedding.word_vecs ** 2).sum(axis=1))[:, np.newaxis]
)
def replace(self, input_anno: Annotation) -> Tuple[bool, str]:
r"""
This function replaces a word words with similar
pretrained embeddings.
Args:
input_anno (Annotation): The input annotation.
Returns:
A tuple of two values, where the first element is a boolean value
indicating whether the replacement happens, and the second
element is the replaced word.
"""
word = input_anno.text
if word not in self.vocab.token_to_id_map_py:
return False, word
source_id = self.vocab.token_to_id_map_py[word]
source_vector = self.normalized_vectors[source_id]
scores = np.dot(self.normalized_vectors, source_vector)
target_ids = np.argpartition(-scores, self.configs["top_k"] + 1)[
: self.configs["top_k"] + 1
]
target_words = [
self.vocab.id_to_token_map_py[idx]
for idx in target_ids
if idx != source_id
and self.vocab.id_to_token_map_py[idx].lower() != word.lower()
]
return True, random.choice(target_words)
| 36.086957
| 77
| 0.675904
|
4a150e10aa97856f11e76434d4af2de0703547a9
| 753
|
py
|
Python
|
pr1696m/max_result.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | 1
|
2020-02-20T12:04:46.000Z
|
2020-02-20T12:04:46.000Z
|
pr1696m/max_result.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | null | null | null |
pr1696m/max_result.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | null | null | null |
from typing import List
from collections import deque
class Solution:
def maxResult(self, nums: List[int], k: int) -> int:
dp = [nums[0]] + [0] * (len(nums) - 1)
d = deque([0])
for i in range(1, len(nums)):
dp[i] = nums[i] + dp[d[0]]
while d and dp[d[-1]] < dp[i]:
d.pop()
d.append(i)
if d[0] == i - k:
d.popleft()
return dp[-1]
# TESTS
for nums, k, expected in [
([1, -1, -2, 4, -7, 3], 2, 7),
([10, -5, -2, 4, 0, 3], 3, 17),
([1, -5, -20, 4, -1, 3, -6, -3], 2, 0),
]:
sol = Solution()
actual = sol.maxResult(nums, k)
print("The max result in", nums, ", k =", k, "->", actual)
assert actual == expected
| 25.1
| 62
| 0.444887
|
4a150eb89d3fffeb018b7608ed9add91225b5f8c
| 831
|
py
|
Python
|
scripts/cyp2c8/hg38/bin/cypgen_caller.py
|
twesigomwedavid/CypGen
|
0febf576e26da1884b4d8e9f79542bf1b01e969e
|
[
"MIT"
] | 3
|
2020-08-03T16:52:30.000Z
|
2021-06-30T16:59:17.000Z
|
scripts/cyp2c8/hg38/bin/cypgen_caller.py
|
twesigomwedavid/CypGen
|
0febf576e26da1884b4d8e9f79542bf1b01e969e
|
[
"MIT"
] | null | null | null |
scripts/cyp2c8/hg38/bin/cypgen_caller.py
|
twesigomwedavid/CypGen
|
0febf576e26da1884b4d8e9f79542bf1b01e969e
|
[
"MIT"
] | 2
|
2020-10-14T10:18:22.000Z
|
2020-12-16T18:03:15.000Z
|
#!/usr/bin/env python3
import os
import sys
import subprocess
from snv_def_modules import *
print("--------------------------------------------\n")
print("CYP2C8 Star Allele Calling with CypGen\n")
print("--------------------------------------------\n")
database = sys.argv[1]
infile = sys.argv[2]
infile_full = sys.argv[3]
infile_full_gt = sys.argv[4]
infile_spec = sys.argv[5]
cn = 2
supp_core_vars = get_core_variants(infile, cn)
print("\nSample core variants:")
print(supp_core_vars)
snv_def_calls = cand_snv_allele_calling(database, infile, infile_full, infile_full_gt, infile_spec, cn)
snv_cand_alleles = snv_def_calls[0]
print("\nCandidate alleles:")
print(snv_cand_alleles)
snv_def_alleles = snv_def_calls[-1]
dip_variants = get_all_vars_gt(infile_full_gt)
print("\nResult:")
print(snv_def_alleles)
| 16.62
| 103
| 0.677497
|
4a150ecd5feb2d558102dffb0477f2a713e1d6cd
| 6,954
|
py
|
Python
|
graph_objs/scattercarpet/unselected/_marker.py
|
wwwidonja/changed_plotly
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
[
"MIT"
] | null | null | null |
graph_objs/scattercarpet/unselected/_marker.py
|
wwwidonja/changed_plotly
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
[
"MIT"
] | null | null | null |
graph_objs/scattercarpet/unselected/_marker.py
|
wwwidonja/changed_plotly
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattercarpet.unselected"
_path_str = "scattercarpet.unselected.marker"
_valid_props = {"color", "opacity", "size"}
# color
# -----
@property
def color(self):
"""
Sets the marker color of unselected points, applied only when a
selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size of unselected points, applied only when a
selection exists.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`new_plotly.graph_objs.scattercarpet.
unselected.Marker`
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.scattercarpet.unselected.Marker
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.scattercarpet.unselected.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 34.088235
| 82
| 0.555939
|
4a150f99f946be4061babe6e7344f01a55f51bb3
| 9,609
|
py
|
Python
|
dfvfs/helpers/windows_path_resolver.py
|
Acidburn0zzz/dfvfs
|
3db8c4e520e3e7527faffeea8f52187c861fa3b6
|
[
"Apache-2.0"
] | 1
|
2019-08-28T23:47:16.000Z
|
2019-08-28T23:47:16.000Z
|
dfvfs/helpers/windows_path_resolver.py
|
Acidburn0zzz/dfvfs
|
3db8c4e520e3e7527faffeea8f52187c861fa3b6
|
[
"Apache-2.0"
] | null | null | null |
dfvfs/helpers/windows_path_resolver.py
|
Acidburn0zzz/dfvfs
|
3db8c4e520e3e7527faffeea8f52187c861fa3b6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""A resolver for Windows paths to file system specific formats."""
from __future__ import unicode_literals
import re
from dfvfs.lib import errors
from dfvfs.lib import py2to3
from dfvfs.path import factory as path_spec_factory
class WindowsPathResolver(object):
"""Resolver object for Windows paths."""
_PATH_SEPARATOR = '\\'
_PATH_EXPANSION_VARIABLE = re.compile(r'^[%][^%]+[%]$')
def __init__(self, file_system, mount_point, drive_letter='C'):
"""Initializes a Windows path helper.
The mount point indicates a path specification where the Windows
file system is mounted. This can either be a path specification
into a storage media image or a directory accessible by the operating
system.
Args:
file_system (FileSystem): a file system.
mount_point (PathSpec): mount point path specification.
drive_letter (Optional[str]): drive letter used by the file system.
Raises:
PathSpecError: if the mount point path specification is incorrect.
ValueError: when file system or mount point is not set.
"""
if not file_system or not mount_point:
raise ValueError('Missing file system or mount point value.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
file_system.type_indicator):
if not hasattr(mount_point, 'location'):
raise errors.PathSpecError(
'Mount point path specification missing location.')
super(WindowsPathResolver, self).__init__()
self._drive_letter = drive_letter
self._environment_variables = {}
self._file_system = file_system
self._mount_point = mount_point
# Windows paths:
# Device path: \\.\PhysicalDrive0
# Volume device path: \\.\C:
# Volume file system path: \\.\C:\
# Extended-length path: \\?\C:\directory\file.txt
# Extended-length UNC path: \\?\UNC\server\share\directory\file.txt
# Local 'absolute' path: \directory\file.txt
# \directory\\file.txt
# Local 'relative' path: ..\directory\file.txt
# Local 'relative' path: .\directory\file.txt
# Volume 'absolute' path: C:\directory\file.txt
# Volume 'relative' path: C:directory\file.txt
# UNC path: \\server\share\directory\file.txt
# Path with environment variable: %SystemRoot%\file.txt
#
# Note Windows also allows paths like:
# C:\..\directory\file.txt
def _PathStripPrefix(self, path):
"""Strips the prefix from a path.
Args:
path (str): Windows path to strip the prefix from.
Returns:
str: path without the prefix or None if the path is not supported.
"""
if path.startswith('\\\\.\\') or path.startswith('\\\\?\\'):
if len(path) < 7 or path[5] != ':' or path[6] != self._PATH_SEPARATOR:
# Cannot handle a non-volume path.
return None
path = path[7:]
elif path.startswith('\\\\'):
# Cannot handle an UNC path.
return None
elif len(path) >= 3 and path[1] == ':':
# Check if the path is a Volume 'absolute' path.
if path[2] != self._PATH_SEPARATOR:
# Cannot handle a Volume 'relative' path.
return None
path = path[3:]
elif path.startswith('\\'):
path = path[1:]
else:
# Cannot handle a relative path.
return None
return path
def _ResolvePath(self, path, expand_variables=True):
"""Resolves a Windows path in file system specific format.
This function will check if the individual path segments exists within
the file system. For this it will prefer the first case sensitive match
above a case insensitive match. If no match was found None is returned.
Args:
path (str): Windows path to resolve.
expand_variables (Optional[bool]): True if path variables should be
expanded or not.
Returns:
tuple[str, PathSpec]: location and matching path specification or
(None, None) if not available.
"""
# Allow for paths that start with an environment variable e.g.
# %SystemRoot%\file.txt
if path.startswith('%'):
path_segment, _, _ = path.partition(self._PATH_SEPARATOR)
if not self._PATH_EXPANSION_VARIABLE.match(path_segment):
path = None
else:
path = self._PathStripPrefix(path)
if path is None:
return None, None
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
file_entry = self._file_system.GetFileEntryByPathSpec(self._mount_point)
expanded_path_segments = self._file_system.SplitPath(
self._mount_point.location)
else:
file_entry = self._file_system.GetRootFileEntry()
expanded_path_segments = []
number_of_expanded_path_segments = 0
search_path_segments = path.split(self._PATH_SEPARATOR)
while search_path_segments:
path_segment = search_path_segments.pop(0)
if file_entry is None:
return None, None
# Ignore empty path segments or path segments containing a single dot.
if not path_segment or path_segment == '.':
continue
if path_segment == '..':
# Only allow to traverse back up to the mount point.
if number_of_expanded_path_segments > 0:
_ = expanded_path_segments.pop(0)
number_of_expanded_path_segments -= 1
file_entry = file_entry.GetParentFileEntry()
continue
if (expand_variables and
self._PATH_EXPANSION_VARIABLE.match(path_segment)):
path_segment = self._environment_variables.get(
path_segment[1:-1].upper(), path_segment)
if self._PATH_SEPARATOR in path_segment:
# The expanded path segment itself can consist of multiple
# path segments, hence we need to split it and prepend it to
# the search path segments list.
path_segments = path_segment.split(self._PATH_SEPARATOR)
path_segments.extend(search_path_segments)
search_path_segments = path_segments
path_segment = search_path_segments.pop(0)
sub_file_entry = file_entry.GetSubFileEntryByName(
path_segment, case_sensitive=False)
if sub_file_entry is None:
return None, None
expanded_path_segments.append(sub_file_entry.name)
number_of_expanded_path_segments += 1
file_entry = sub_file_entry
location = self._file_system.JoinPath(expanded_path_segments)
return location, file_entry.path_spec
def GetWindowsPath(self, path_spec):
"""Returns the Windows path based on a resolved path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
str: corresponding Windows path or None if the Windows path could not
be determined.
Raises:
PathSpecError: if the path specification is incorrect.
"""
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
if not location.startswith(self._mount_point.location):
raise errors.PathSpecError(
'Path specification does not contain mount point.')
else:
if not hasattr(path_spec, 'parent'):
raise errors.PathSpecError('Path specification missing parent.')
if path_spec.parent != self._mount_point:
raise errors.PathSpecError(
'Path specification does not contain mount point.')
path_segments = self._file_system.SplitPath(location)
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
mount_point_path_segments = self._file_system.SplitPath(
self._mount_point.location)
path_segments = path_segments[len(mount_point_path_segments):]
return '{0:s}:\\{1:s}'.format(
self._drive_letter, self._PATH_SEPARATOR.join(path_segments))
def ResolvePath(self, path, expand_variables=True):
"""Resolves a Windows path in file system specific format.
Args:
path (str): Windows path to resolve.
expand_variables (Optional[bool]): True if path variables should be
expanded or not.
Returns:
PathSpec: path specification in file system specific format.
"""
location, path_spec = self._ResolvePath(
path, expand_variables=expand_variables)
if not location or not path_spec:
return None
# Note that we don't want to set the keyword arguments when not used because
# the path specification base class will check for unused keyword arguments
# and raise.
kwargs = path_spec_factory.Factory.GetProperties(path_spec)
kwargs['location'] = location
if not path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
kwargs['parent'] = self._mount_point
return path_spec_factory.Factory.NewPathSpec(
self._file_system.type_indicator, **kwargs)
def SetEnvironmentVariable(self, name, value):
"""Sets an environment variable in the Windows path helper.
Args:
name (str): name of the environment variable without enclosing
%-characters, e.g. SystemRoot as in %SystemRoot%.
value (str): value of the environment variable.
"""
if isinstance(value, py2to3.STRING_TYPES):
value = self._PathStripPrefix(value)
if value is not None:
self._environment_variables[name.upper()] = value
| 35.069343
| 80
| 0.679259
|
4a15100b4003e735536c88a65ece7b942ac76ef9
| 24,612
|
py
|
Python
|
nipyapi/nifi/api_client.py
|
oneextrafact/nipyapi
|
4c184d69002a8ee3ac528fda63b2ffcc6cedbae5
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/nifi/api_client.py
|
oneextrafact/nipyapi
|
4c184d69002a8ee3ac528fda63b2ffcc6cedbae5
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/nifi/api_client.py
|
oneextrafact/nipyapi
|
4c184d69002a8ee3ac528fda63b2ffcc6cedbae5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.10.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import re
import json
import mimetypes
import tempfile
import threading
from datetime import date, datetime
# python 2 and python 3 compatibility library
from six import PY3, integer_types, iteritems, text_type
from six.moves.urllib.parse import quote
from . import models
from .configuration import Configuration
from .rest import ApiException, RESTClientObject
class ApiClient(object):
"""
Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param host: The base path for the server to call.
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to the API.
"""
PRIMITIVE_TYPES = (float, bool, bytes, text_type) + integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': date,
'datetime': datetime,
'object': object,
}
def __init__(self, host=None, header_name=None, header_value=None, cookie=None):
"""
Constructor of the class.
"""
self.rest_client = RESTClientObject()
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
if host is None:
self.host = Configuration().host
else:
self.host = host
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Swagger-Codegen/1.0.0/python'
@property
def user_agent(self):
"""
Gets user agent.
"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
"""
Sets user agent.
"""
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None,
_return_http_data_only=None, collection_formats=None, _preload_content=True,
_request_timeout=None):
config = Configuration()
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k, quote(str(v), safe=config.safe_chars_for_path_param))
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.host + resource_path
# perform request and return response
response_data = self.request(method, url,
query_params=query_params,
headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if callback:
if _return_http_data_only:
callback(return_data)
else:
callback((return_data, response_data.status, response_data.getheaders()))
elif _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status, response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""
Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""
Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""
Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match('list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == date:
return self.__deserialize_date(data)
elif klass == datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None,
_return_http_data_only=None, collection_formats=None, _preload_content=True,
_request_timeout=None):
"""
Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, define a function for callback.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param callback function: Callback function for asynchronous request.
If provide this parameter,
the request will be called asynchronously.
:param _return_http_data_only: response data without head status code and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
:return:
If provide parameter callback,
the request will be called asynchronously.
The method will return the request thread.
If parameter callback is None,
then the method will return the response directly.
"""
if callback is None:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings, callback,
_return_http_data_only, collection_formats, _preload_content, _request_timeout)
else:
thread = threading.Thread(target=self.__call_api,
args=(resource_path, method,
path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
callback, _return_http_data_only,
collection_formats, _preload_content, _request_timeout))
thread.start()
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True, _request_timeout=None):
"""
Makes the HTTP request using RESTClient.
"""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""
Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in iteritems(params) if isinstance(params, dict) else params:
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""
Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = mimetypes.\
guess_type(filename)[0] or 'application/octet-stream'
params.append(tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""
Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""
Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
config = Configuration()
if not auth_settings:
return
for auth in auth_settings:
auth_setting = config.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
config = Configuration()
fd, path = tempfile.mkstemp(dir=config.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.\
search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).\
group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "w") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""
Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return unicode(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""
Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""
Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a date object".format(string)
)
def __deserialize_datatime(self, string):
"""
Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason=(
"Failed to parse `{0}` into a datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.swagger_types:
return data
kwargs = {}
for attr, attr_type in iteritems(klass.swagger_types):
if data is not None \
and klass.attribute_map[attr] in data \
and isinstance(data, (list, dict)):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
return instance
| 38.820189
| 479
| 0.546725
|
4a15128c8c1b38ece77c7829cf0ffdff5281f069
| 1,733
|
py
|
Python
|
sympy/logic/utilities/dimacs.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
sympy/logic/utilities/dimacs.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
sympy/logic/utilities/dimacs.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
"""For reading in DIMACS file format
www.cs.ubc.ca/~hoos/SATLIB/Benchmarks/SAT/satformat.ps
"""
from sympy.core import Symbol
from sympy.logic.boolalg import And, Or
import re
def load(s):
"""Loads a boolean expression from a string.
Examples
========
>>> from sympy.logic.utilities.dimacs import load
>>> load('1')
cnf_1
>>> load('1 2')
cnf_1 | cnf_2
>>> load('1 \\n 2')
cnf_1 & cnf_2
>>> load('1 2 \\n 3')
cnf_3 & (cnf_1 | cnf_2)
"""
clauses = []
lines = s.split('\n')
pComment = re.compile(r'c.*')
pStats = re.compile(r'p\s*cnf\s*(\d*)\s*(\d*)')
while len(lines) > 0:
line = lines.pop(0)
# Only deal with lines that aren't comments
if not pComment.match(line):
m = pStats.match(line)
if not m:
nums = line.rstrip('\n').split(' ')
list = []
for lit in nums:
if lit != '':
if int(lit) == 0:
continue
num = abs(int(lit))
sign = True
if int(lit) < 0:
sign = False
if sign:
list.append(Symbol("cnf_%s" % num))
else:
list.append(~Symbol("cnf_%s" % num))
if len(list) > 0:
clauses.append(Or(*list))
return And(*clauses)
def load_file(location):
"""Loads a boolean expression from a file."""
with open(location) as f:
s = f.read()
return load(s)
| 24.408451
| 65
| 0.431044
|
4a15131be234314251e4510388d568e3e6934b7f
| 4,405
|
py
|
Python
|
configs/restorers/tdan/tdan_vimeo90k_bix4_ft_lr5e-5_400k.py
|
ChenShuwei1001/mmediting
|
285e629fe9da8a13c7538a6bb3347e8870cd7201
|
[
"Apache-2.0"
] | 1
|
2021-09-16T08:55:47.000Z
|
2021-09-16T08:55:47.000Z
|
configs/restorers/tdan/tdan_vimeo90k_bix4_ft_lr5e-5_400k.py
|
ChenShuwei1001/mmediting
|
285e629fe9da8a13c7538a6bb3347e8870cd7201
|
[
"Apache-2.0"
] | 1
|
2021-08-05T16:20:39.000Z
|
2021-08-05T16:20:39.000Z
|
configs/restorers/tdan/tdan_vimeo90k_bix4_ft_lr5e-5_400k.py
|
ChenShuwei1001/mmediting
|
285e629fe9da8a13c7538a6bb3347e8870cd7201
|
[
"Apache-2.0"
] | null | null | null |
exp_name = 'tdan_vimeo90k_bix4_ft_lr5e-5_400k'
# model settings
model = dict(
type='TDAN',
generator=dict(type='TDANNet'),
pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='mean'),
lq_pixel_loss=dict(type='MSELoss', loss_weight=0.25, reduction='mean'))
# model training and testing settings
train_cfg = None
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=8, convert_to='y')
# dataset settings
train_dataset_type = 'SRVimeo90KDataset'
val_dataset_type = 'SRVid4Dataset'
train_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(
type='Normalize',
keys=['lq', 'gt'],
mean=[0.5, 0.5, 0.5],
std=[1, 1, 1]),
dict(type='PairedRandomCrop', gt_patch_size=192),
dict(
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
val_pipeline = [
dict(type='GenerateFrameIndiceswithPadding', padding='reflection'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(
type='Normalize',
keys=['lq', 'gt'],
mean=[0.5, 0.5, 0.5],
std=[1, 1, 1]),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
demo_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='Normalize', keys=['lq'], mean=[0.5, 0.5, 0.5], std=[1, 1, 1]),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
workers_per_gpu=8,
train_dataloader=dict(samples_per_gpu=16, drop_last=True), # 8 gpus
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1),
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/Vimeo-90K/BIx4',
gt_folder='data/Vimeo-90K/GT',
ann_file='data/Vimeo-90K/meta_info_Vimeo90K_train_GT.txt',
num_input_frames=5,
pipeline=train_pipeline,
scale=4,
test_mode=False)),
val=dict(
type=val_dataset_type,
lq_folder='data/Vid4/BIx4',
gt_folder='data/Vid4/GT',
pipeline=val_pipeline,
ann_file='data/Vid4/meta_info_Vid4_GT.txt',
scale=4,
num_input_frames=5,
test_mode=True),
test=dict(
type=val_dataset_type,
lq_folder='data/SPMCS/BIx4',
gt_folder='data/SPMCS/GT',
pipeline=val_pipeline,
ann_file='data/SPMCS/meta_info_SPMCS_GT.txt',
scale=4,
num_input_frames=5,
test_mode=True),
)
# optimizer
optimizers = dict(generator=dict(type='Adam', lr=5e-5))
# learning policy
total_iters = 400000
lr_config = dict(policy='Step', by_epoch=False, step=[400000], gamma=0.5)
checkpoint_config = dict(interval=50000, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
evaluation = dict(interval=50000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/{exp_name}'
load_from = './experiments/tdan_vimeo90k_bix4_lr1e-4_400k/iter_400000.pth'
resume_from = None
workflow = [('train', 1)]
| 31.241135
| 79
| 0.621112
|
4a151392c9bfcdd7a7a75be0a68ab3c07c0411c9
| 47,639
|
py
|
Python
|
examples/cog.py
|
yanlai00/cog
|
01dbfcbe336072b6f4cb2b9952606bd45c65af7f
|
[
"MIT"
] | null | null | null |
examples/cog.py
|
yanlai00/cog
|
01dbfcbe336072b6f4cb2b9952606bd45c65af7f
|
[
"MIT"
] | null | null | null |
examples/cog.py
|
yanlai00/cog
|
01dbfcbe336072b6f4cb2b9952606bd45c65af7f
|
[
"MIT"
] | 2
|
2021-06-07T07:35:11.000Z
|
2021-07-15T22:02:52.000Z
|
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.load_buffer import load_data_from_npy_chaining,load_data_from_npy_chaining_mult
from rlkit.samplers.data_collector import MdpPathCollector, \
CustomMDPPathCollector
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic, DeterministicPolicy
from rlkit.torch.sac.cql import CQLTrainer
from rlkit.torch.sac.cql_montecarlo import CQLMCTrainer
from rlkit.torch.sac.cql_context import CQLTrainerContext
from rlkit.torch.sac.cql_bchead import CQLBCTrainer
from rlkit.torch.sac.cql_single import CQLSingleTrainer
from rlkit.torch.sac.brac import BRACTrainer
from rlkit.torch.sac.brac_shifted import BRACShiftedTrainer
from rlkit.torch.sac.td3bc import TD3BCTrainer
from rlkit.torch.conv_networks import CNN, ConcatCNN, ConcatBottleneckCNN, TwoHeadCNN, VQVAEEncoderConcatCNN, \
ConcatBottleneckVQVAECNN, VQVAEEncoderCNN
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from rlkit.util.video import VideoSaveFunction
from rlkit.launchers.launcher_util import setup_logger
import gym
import torch
import argparse, os
import roboverse
import numpy as np
import os
from os.path import expanduser
DEFAULT_BUFFER = ('/media/avi/data/Work/github/avisingh599/minibullet'
'/data/oct6_Widow250DrawerGraspNeutral-v0_20K_save_all'
'_noise_0.1_2020-10-06T19-37-26_100.npy')
DEFAULT_PRIOR_BUFFER = ('/media/avi/data/Work/github/avisingh599/minibullet'
'/data/oct6_Widow250DrawerGraspNeutral-v0_20K_save_all'
'_noise_0.1_2020-10-06T19-37-26_100.npy')
DEFAULT_TASK_BUFFER = ('/media/avi/data/Work/github/avisingh599/minibullet'
'/data/oct6_Widow250DrawerGraspNeutral-v0_20K_save_all'
'_noise_0.1_2020-10-06T19-37-26_100.npy')
CUSTOM_LOG_DIR = '/home/stian/doodad-output'
def experiment(variant):
eval_env = roboverse.make(variant['env'], transpose_image=True)
if variant['num_sample'] != 0:
eval_env.num_obj_sample=variant['num_sample']
if variant['debug_scale_actions']:
class ActionWrapper(gym.ActionWrapper):
def __init__(self, env):
super().__init__(env)
def action(self, act):
if variant['scale_type'] == 1:
act = np.concatenate((act-np.concatenate((act[:-2]*0.9,[0,0])), act))
elif variant['scale_type'] == 2:
eps = np.random.uniform(-0.05, 0.05, act[:-2].shape)
act = np.concatenate((eps, act[:-2]-eps, act[-2:]))
return act
eval_env = ActionWrapper(eval_env)
expl_env = eval_env
action_dim = eval_env.action_space.low.size
print(action_dim)
cnn_params = variant['cnn_params']
cnn_params.update(
dropout = variant['dropout'],
dropout_prob = variant['dropout_prob'],
)
if variant['bigger_net']:
print('bigger_net')
cnn_params.update(
hidden_sizes=[1024, 512, 512, 512, 256],
)
if variant['deeper_net']:
print('deeper conv net')
cnn_params.update(
kernel_sizes=[3, 3, 3, 3, 3],
n_channels=[32, 32, 32, 32, 32],
strides=[1, 1, 1, 1, 1],
paddings=[1, 1, 1, 1, 1],
pool_sizes=[2, 2, 1, 1, 1],
pool_strides=[2, 2, 1, 1, 1],
pool_paddings=[0, 0, 0, 0, 0]
)
if variant['spectral_norm_conv']:
cnn_params.update(
spectral_norm_conv=True,
)
if variant['spectral_norm_fc']:
cnn_params.update(
spectral_norm_fc=True,
)
cnn_params.update(
input_width=48,
input_height=48,
input_channels=9 if variant['history'] else 3,
output_size=1,
added_fc_input_size=action_dim + 6 if variant['context'] else action_dim,
normalize_conv_activation=variant['normalize_conv_activation']
)
if variant['hist_state']:
cnn_params['added_fc_input_size'] = action_dim + 10 *variant['num_hist']
if variant['vqvae_enc']:
if variant['bottleneck']:
qf1 = ConcatBottleneckVQVAECNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],
deterministic=variant['deterministic_bottleneck'],
spectral_norm_conv=cnn_params['spectral_norm_conv'],
spectral_norm_fc=cnn_params['spectral_norm_fc'])
qf2 = ConcatBottleneckVQVAECNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],
deterministic=variant['deterministic_bottleneck'],
spectral_norm_conv = cnn_params['spectral_norm_conv'],
spectral_norm_fc = cnn_params['spectral_norm_fc'])
if variant['share_encoder']:
print('sharing encoder weights between QF1 and QF2!')
qf2.encoder = qf1.encoder
target_qf1 = ConcatBottleneckVQVAECNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],
deterministic=variant['deterministic_bottleneck'],
spectral_norm_conv=cnn_params['spectral_norm_conv'],
spectral_norm_fc=cnn_params['spectral_norm_fc'])
target_qf2 = ConcatBottleneckVQVAECNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],
deterministic=variant['deterministic_bottleneck'],
spectral_norm_conv=cnn_params['spectral_norm_conv'],
spectral_norm_fc=cnn_params['spectral_norm_fc'])
else:
qf1 = VQVAEEncoderConcatCNN(**cnn_params)
qf2 = VQVAEEncoderConcatCNN(**cnn_params)
if variant['share_encoder']:
print('sharing encoder weights between QF1 and QF2!')
del qf2.encoder
qf2.encoder = qf1.encoder
target_qf1 = VQVAEEncoderConcatCNN(**cnn_params)
target_qf2 = VQVAEEncoderConcatCNN(**cnn_params)
else:
if variant['mcret'] or variant['bchead']:
qf1 = TwoHeadCNN(action_dim, deterministic= not variant['bottleneck'], bottleneck_dim=variant['bottleneck_dim'])
qf2 = TwoHeadCNN(action_dim, deterministic= not variant['bottleneck'], bottleneck_dim=variant['bottleneck_dim'])
target_qf1 = TwoHeadCNN(action_dim, deterministic= not variant['bottleneck'], bottleneck_dim=variant['bottleneck_dim'])
target_qf2 = TwoHeadCNN(action_dim, deterministic= not variant['bottleneck'], bottleneck_dim=variant['bottleneck_dim'])
if variant['share_encoder']:
raise NotImplementedError
elif variant['bottleneck']:
qf1 = ConcatBottleneckCNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],deterministic=variant['deterministic_bottleneck'])
qf2 = ConcatBottleneckCNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],deterministic=variant['deterministic_bottleneck'])
if variant['share_encoder']:
raise NotImplementedError
target_qf1 = ConcatBottleneckCNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],deterministic=variant['deterministic_bottleneck'])
target_qf2 = ConcatBottleneckCNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],deterministic=variant['deterministic_bottleneck'])
else:
qf1 = ConcatCNN(**cnn_params)
qf2 = ConcatCNN(**cnn_params)
if variant['share_encoder']:
raise NotImplementedError
target_qf1 = ConcatCNN(**cnn_params)
target_qf2 = ConcatCNN(**cnn_params)
target_qf1.load_state_dict(qf1.state_dict())
target_qf2.load_state_dict(qf2.state_dict())
if variant['bottleneck_policy']:
cnn_params.update(
output_size=256,
added_fc_input_size=6 if variant['context'] else 0,
hidden_sizes=[1024, 512],
spectral_norm_fc=False,
spectral_norm_conv=False,
normalize_conv_activation=False,
)
policy_obs_processor = ConcatBottleneckCNN(cnn_params['added_fc_input_size'], bottleneck_dim=variant['bottleneck_dim'],deterministic=variant['deterministic_bottleneck'], output_size=cnn_params['output_size'])
elif variant['vqvae_policy']:
if variant['share_encoder']:
print('sharing encoder weights between QF and Policy with VQVAE Encoder')
policy_obs_processor = qf1.encoder
cnn_params.update(
output_size=qf1.get_conv_output_size(),
)
else:
cnn_params.update(
output_size=256,
added_fc_input_size=6 if variant['context'] else 0,
hidden_sizes=[1024, 512],
spectral_norm_fc=False,
spectral_norm_conv=False,
normalize_conv_activation=False,
)
policy_obs_processor = VQVAEEncoderCNN(**cnn_params)
else:
cnn_params.update(
output_size=256,
added_fc_input_size=6 if variant['context'] else 0,
hidden_sizes=[1024, 512],
spectral_norm_fc=False,
spectral_norm_conv=False,
normalize_conv_activation=False,
)
if variant['hist_state']:
cnn_params['added_fc_input_size'] = 10 *variant['num_hist']
policy_obs_processor = CNN(**cnn_params)
if variant['td3bc']:
policy = DeterministicPolicy(
obs_dim=cnn_params['output_size'],
action_dim=action_dim,
hidden_sizes=[256, 256, 256],
obs_processor=policy_obs_processor,
shared_encoder=variant['share_encoder'],
)
else:
policy = TanhGaussianPolicy(
obs_dim=cnn_params['output_size'],
action_dim=action_dim,
hidden_sizes=[256, 256, 256],
obs_processor=policy_obs_processor,
shared_encoder=variant['share_encoder'],
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = CustomMDPPathCollector(
eval_env,
)
observation_key = 'image'
if args.buffer in [5,6]:
replay_buffer = load_data_from_npy_chaining_mult(
variant, expl_env, observation_key)
else:
replay_buffer = load_data_from_npy_chaining(
variant,
expl_env,
observation_key,
duplicate=variant['duplicate'],
num_traj=variant['num_traj'],
debug_scale_actions=variant['debug_scale_actions'],
debug_shift=variant['debug_shift'],
scale_type=variant['scale_type'],
hist_state=variant['hist_state'],
num_hist=variant['num_hist'],
)
if variant['val']:
if args.buffer in [5,6]:
replay_buffer_val = load_data_from_npy_chaining_mult(
variant, expl_env, observation_key)
else:
buffers = []
ba = lambda x, p=args.prob, y=None: buffers.append((path+x,dict(p=p,alter_type=y,)))
if args.buffer == 30:
path = p_data_path
ba('val_pick_2obj_Widow250PickTrayMult-v0_100_save_all_noise_0.1_2021-05-07T01-16-43_117.npy', p=args.prob,y='zero')
ba('val_place_2obj_Widow250PlaceTrayMult-v0_100_save_all_noise_0.1_2021-05-07T01-16-48_108.npy', p=args.prob)
if args.buffer == 32 or args.buffer == 9001:
path = p_data_path
ba('val_pick_2obj_Widow250PickTrayMult-v0_100_save_all_noise_0.1_2021-05-07T01-16-43_117.npy', p=args.prob,y='zero')
ba('val_place_2obj_Widow250PlaceTrayMult-v0_100_save_all_noise_0.1_2021-05-07T01-16-48_108.npy', p=args.prob)
elif args.buffer == 35:
path = p_data_path
ba('val_pick_35_Widow250PickTrayMult-v0_100_save_all_noise_0.1_2021-06-14T21-52-13_100.npy',
p=args.prob, y='zero')
ba('val_place_35_Widow250PlaceTrayMult-v0_100_save_all_noise_0.1_2021-06-14T21-50-14_100.npy',
p=args.prob)
elif args.buffer == 36:
path = p_data_path
ba('val_pick_20obj_Widow250PickTrayMult-v0_100_save_all_noise_0.1_2021-05-07T01-16-53_114.npy',
p=args.prob, y='zero')
ba('val_place_20obj_Widow250PlaceTrayMult-v0_100_save_all_noise_0.1_2021-05-07T01-16-58_90.npy',
p=args.prob)
old_pb, variant['prior_buffer'] = variant['prior_buffer'], buffers[0]
old_tb, variant['task_buffer'] = variant['task_buffer'], buffers[1]
old_nt, variant['num_traj'] = variant['num_traj'], 0
replay_buffer_val = load_data_from_npy_chaining(
variant, expl_env, observation_key, duplicate=variant['duplicate'], num_traj=variant['num_traj'])
variant['prior_buffer'] = old_pb
variant['task_buffer'] = old_tb
variant['num_traj'] = old_nt
print('validation')
else:
print('no validation')
replay_buffer_val = None
# Translate 0/1 rewards to +4/+10 rewards.
if variant['use_positive_rew']:
if set(np.unique(replay_buffer._rewards)).issubset({0, 1}):
replay_buffer._rewards = replay_buffer._rewards * 6.0
replay_buffer._rewards = replay_buffer._rewards + 4.0
assert set(np.unique(replay_buffer._rewards)).issubset(
set(6.0 * np.array([0, 1]) + 4.0))
if variant['brac']:
cnn_params.update(
output_size=256,
added_fc_input_size=6 if variant['context'] else 0,
hidden_sizes=[1024, 512],
spectral_norm_fc=False,
spectral_norm_conv=False,
normalize_conv_activation=False,
)
behavior_policy = TanhGaussianPolicy(
obs_dim=cnn_params['output_size'],
action_dim=action_dim,
hidden_sizes=[256, 256, 256],
obs_processor=CNN(**cnn_params),
shared_encoder=variant['share_encoder'],
)
if not variant['continual']:
import torch
behavior_policy.load_state_dict(torch.load(variant['behavior_path'])['policy_state_dict'] if variant['behavior_path'] else None)
if variant['brac_shifted']:
trainer = BRACShiftedTrainer(
env=eval_env,
policy=policy,
behavior_policy=behavior_policy.to(ptu.device),
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
beta=variant['beta'],
log_dir=variant['log_dir'],
variant_dict=variant,
continual=variant['continual'],
**variant['trainer_kwargs']
)
else:
trainer = BRACTrainer(
env=eval_env,
policy=policy,
behavior_policy=behavior_policy.to(ptu.device),
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
beta=variant['beta'],
log_dir=variant['log_dir'],
variant_dict=variant,
continual=variant['continual'],
bottleneck=variant['bottleneck_policy'] or variant['bottleneck'],
bottleneck_type = 'policy' if variant['bottleneck_policy'] else 'qf',
bottleneck_const=variant['bottleneck_const'],
start_bottleneck=0,
**variant['trainer_kwargs']
)
elif variant['td3bc']:
trainer = TD3BCTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
beta=variant['beta'],
log_dir=variant['log_dir'],
variant_dict=variant,
continual=variant['continual'],
**variant['trainer_kwargs']
)
elif variant['mcret']:
trainer = CQLMCTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
bottleneck=variant['bottleneck'],
bottleneck_const=variant['bottleneck_const'],
bottleneck_lagrange=variant['bottleneck_lagrange'],
dr3=variant['dr3'],
dr3_feat=variant['dr3_feat'],
dr3_weight=variant['dr3_weight'],
log_dir = variant['log_dir'],
wand_b=not variant['debug'],
only_bottleneck = variant['only_bottleneck'],
variant_dict=variant,
gamma=variant['gamma'],
**variant['trainer_kwargs']
)
elif variant['bchead']:
trainer = CQLBCTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
bottleneck=variant['bottleneck'],
bottleneck_const=variant['bottleneck_const'],
bottleneck_lagrange=variant['bottleneck_lagrange'],
dr3=variant['dr3'],
dr3_feat=variant['dr3_feat'],
dr3_weight=variant['dr3_weight'],
log_dir = variant['log_dir'],
wand_b=not variant['debug'],
only_bottleneck = variant['only_bottleneck'],
variant_dict=variant,
gamma=variant['gamma'],
**variant['trainer_kwargs']
)
elif variant['singleQ']:
trainer = CQLSingleTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
target_qf1=target_qf1,
bottleneck=variant['bottleneck'],
bottleneck_const=variant['bottleneck_const'],
bottleneck_lagrange=variant['bottleneck_lagrange'],
dr3=variant['dr3'],
dr3_feat=variant['dr3_feat'],
dr3_weight=variant['dr3_weight'],
only_bottleneck = variant['only_bottleneck'],
log_dir = variant['log_dir'],
wand_b=not variant['debug'],
variant_dict=variant,
validation=variant['val'],
validation_buffer=replay_buffer_val,
squared=variant['squared'],
**variant['trainer_kwargs']
)
del qf2, target_qf2
import torch; torch.cuda.empty_cache()
elif variant['debug_scale_actions'] and variant['context'] or variant['hist_state']:
trainer = CQLTrainerContext(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
bottleneck=variant['bottleneck'],
bottleneck_const=variant['bottleneck_const'],
bottleneck_lagrange=variant['bottleneck_lagrange'],
dr3=variant['dr3'],
dr3_feat=variant['dr3_feat'],
dr3_weight=variant['dr3_weight'],
log_dir = variant['log_dir'],
wand_b=not variant['debug'],
only_bottleneck = variant['only_bottleneck'],
variant_dict=variant,
validation=variant['val'],
validation_buffer=replay_buffer_val,
real_data=False,
context_key='curr_diff' if variant['debug_scale_actions'] else 'prev_states', #TODO change from hardcoded value
**variant['trainer_kwargs']
)
else:
trainer = CQLTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
bottleneck=variant['bottleneck'],
bottleneck_const=variant['bottleneck_const'],
bottleneck_lagrange=variant['bottleneck_lagrange'],
dr3=variant['dr3'],
dr3_feat=variant['dr3_feat'],
dr3_weight=variant['dr3_weight'],
only_bottleneck = variant['only_bottleneck'],
log_dir = variant['log_dir'],
wand_b=not variant['debug'],
variant_dict=variant,
validation=variant['val'],
validation_buffer=replay_buffer_val,
squared=variant['squared'],
history=variant['history'],
regularization = variant['regularization'],
regularization_type = variant['regularization_type'],
regularization_weight = variant['regularization_weight'],
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
eval_both=False,
batch_rl=True,
**variant['algorithm_kwargs']
)
video_func = VideoSaveFunction(variant)
algorithm.post_epoch_funcs.append(video_func)
algorithm.to(ptu.device)
algorithm.train()
def enable_gpus(gpu_str):
if (gpu_str is not ""):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_str
return
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algorithm="CQL",
version="normal",
algorithm_kwargs=dict(
# num_epochs=100,
# num_eval_steps_per_epoch=50,
# num_trains_per_train_loop=100,
# num_expl_steps_per_train_loop=100,
# min_num_steps_before_training=100,
# max_path_length=10,
num_epochs=3000,
num_eval_steps_per_epoch=5,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=0,
min_num_steps_before_training=1000,
max_path_length=30,
batch_size=256,
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
policy_lr=1E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
# Target nets/ policy vs Q-function update
policy_eval_start=10000,
num_qs=2,
# min Q
temp=1.0,
min_q_version=3,
min_q_weight=5.0,
# lagrange
with_lagrange=False, # Defaults to False
lagrange_thresh=5.0,
# extra params
num_random=1,
max_q_backup=False,
deterministic_backup=False,
),
cnn_params=dict(
kernel_sizes=[3, 3, 3],
n_channels=[16, 16, 16],
strides=[1, 1, 1],
hidden_sizes=[1024, 512, 256],
paddings=[1, 1, 1],
pool_type='max2d',
pool_sizes=[2, 2, 1], # the one at the end means no pool
pool_strides=[2, 2, 1],
pool_paddings=[0, 0, 0],
image_augmentation=True,
image_augmentation_padding=4,
spectral_norm_conv=False,
spectral_norm_fc=False,
),
dump_video_kwargs=dict(
imsize=48,
save_video_period=1,
),
)
parser = argparse.ArgumentParser()
parser.add_argument("--env", type=str, required=True)
parser.add_argument("--max-path-length", type=int, required=True)
parser.add_argument("--bottleneck", action='store_true')
parser.add_argument('--bottleneck_const', type=float, default=0.5)
parser.add_argument('--bottleneck_dim', type=int, default=16)
parser.add_argument('--bottleneck_lagrange', action='store_true')
parser.add_argument("--deterministic_bottleneck", action="store_true", default=False)
parser.add_argument("--only_bottleneck", action="store_true", default=False)
parser.add_argument("--mcret", action='store_true')
parser.add_argument("--bchead", action='store_true')
parser.add_argument("--prior-buffer", type=str, default=DEFAULT_PRIOR_BUFFER)
parser.add_argument("--task-buffer", type=str, default=DEFAULT_TASK_BUFFER)
parser.add_argument("--buffer", type=str, default=DEFAULT_BUFFER)
parser.add_argument("--gpu", default='0', type=str)
parser.add_argument("--min-q-weight", default=1.0, type=float,
help="Value of alpha in CQL")
parser.add_argument("--use-lagrange", action="store_true", default=False)
parser.add_argument("--lagrange-thresh", default=5.0, type=float,
help="Value of tau, used with --use-lagrange")
parser.add_argument("--use-positive-rew", action="store_true", default=False)
parser.add_argument("--duplicate", action="store_true", default=False)
parser.add_argument("--val", action="store_true", default=False)
parser.add_argument("--max-q-backup", action="store_true", default=False,
help="For max_{a'} backups, set this to true")
parser.add_argument("--no-deterministic-backup", action="store_true",
default=False,
help="By default, deterministic backup is used")
parser.add_argument("--policy-eval-start", default=1e9,
type=float)
parser.add_argument("--policy-lr", default=1e-4, type=float)
parser.add_argument("--min-q-version", default=3, type=int,
help=("min_q_version = 3 (CQL(H)), "
"version = 2 (CQL(rho))"))
parser.add_argument("--num-eval-per-epoch", type=int, default=5)
parser.add_argument("--seed", default=10, type=int)
parser.add_argument("--prob", default=1, type=float)
parser.add_argument("--old_prior_prob", default=0, type=float)
parser.add_argument('--gamma', default=1, type=float)
parser.add_argument('--num_traj', default=0, type=int)
parser.add_argument('--eval_num', default=0, type=int)
parser.add_argument("--name", default='test', type=str)
parser.add_argument("--discount", default=0.99, type=float)
parser.add_argument('--only_one', action='store_true')
parser.add_argument("--squared", action="store_true", default=False)
parser.add_argument("--azure", action="store_true", default=False)
parser.add_argument("--bigger_net", action="store_true", default=False)
parser.add_argument("--deeper_net", action="store_true", default=False)
parser.add_argument("--vqvae_enc", action="store_true", default=False)
parser.add_argument("--vqvae_policy", action="store_true", default=False)
parser.add_argument("--share_encoder", action="store_true", default=False)
parser.add_argument("--spectral_norm_conv", action="store_true", default=False)
parser.add_argument("--spectral_norm_fc", action="store_true", default=False)
parser.add_argument("--dr3", action="store_true", default=False)
parser.add_argument("--dr3_feat", action="store_true", default=False)
parser.add_argument("--dr3_weight", default=0.001, type=float)
parser.add_argument("--eval_every_n", default=1, type=int)
parser.add_argument('--singleQ', action='store_true')
parser.add_argument('--debug_scale_actions', action='store_true')
parser.add_argument('--debug_shift', action='store_true')
parser.add_argument('--hist_state', action='store_true')
parser.add_argument('--context', action='store_true')
parser.add_argument('--normalize_conv_activation', action='store_true')
parser.add_argument('--history', action='store_true')
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--scale_type', type=int, default=1)
parser.add_argument('--num_hist', type=int, default=1)
parser.add_argument('--brac', action='store_true')
parser.add_argument('--beta', type=float, default=1.0)
parser.add_argument('--continual', action='store_true')
parser.add_argument('--brac_shifted', action='store_true')
parser.add_argument('--behavior_path', default='/nfs/kun1/users/asap7772/cog/data/updatedbuffer-behavior-bc/updatedbuffer_behavior_bc_2021_08_22_09_52_28_0000--s-0/model_pkl/250.pt', type=str)
parser.add_argument('--regularization', action='store_true')
parser.add_argument('--regularization_type', type=str, default='l2')
parser.add_argument('--regularization_weight', type=float, default=0.0)
parser.add_argument('--dropout', action='store_true')
parser.add_argument('--dropout_prob', type=float, default=0.0)
parser.add_argument('--td3bc', action='store_true')
parser.add_argument('--bottleneck_policy', action='store_true')
args = parser.parse_args()
enable_gpus(args.gpu)
variant['bottleneck_policy'] = args.bottleneck_policy
variant['regularization'] = args.regularization
variant['td3bc'] = args.td3bc
variant['regularization_type'] = args.regularization_type
variant['regularization_weight'] = args.regularization_weight
variant['brac_shifted'] = args.brac_shifted
variant['dropout'] = args.dropout
variant['dropout_prob'] = args.dropout_prob
variant['continual'] = args.continual
variant['behavior_path'] = args.behavior_path
variant['num_traj'] = args.num_traj
variant['beta'] = args.beta
variant['scale_type']=args.scale_type
variant['context'] = args.context
variant['brac'] = args.brac
variant['history'] = args.history
variant['debug_scale_actions'] = args.debug_scale_actions
variant['debug_shift'] = args.debug_shift
variant['num_hist'] = args.num_hist
variant['hist_state'] = args.hist_state
variant['algorithm_kwargs']['batch_size'] = args.batch_size
variant['trainer_kwargs']['discount'] = args.discount
variant['squared'] = args.squared
variant['bigger_net'] = args.bigger_net
variant['deeper_net'] = args.deeper_net
variant['vqvae_enc'] = args.vqvae_enc
variant['vqvae_policy'] = args.vqvae_policy
variant['share_encoder'] = args.share_encoder
variant['singleQ'] = args.singleQ
variant['normalize_conv_activation'] = args.normalize_conv_activation
variant['spectral_norm_conv'] = args.spectral_norm_conv
variant['spectral_norm_fc'] = args.spectral_norm_fc
variant['env'] = args.env
variant['val'] = args.val
variant['algorithm_kwargs']['max_path_length'] = args.max_path_length
variant['algorithm_kwargs']['num_eval_steps_per_epoch'] = \
args.num_eval_per_epoch*args.max_path_length
variant['algorithm_kwargs']['eval_every_n_epochs'] = args.eval_every_n
variant['prior_buffer'] = args.prior_buffer
variant['task_buffer'] = args.task_buffer
variant['bottleneck'] = args.bottleneck
variant['bottleneck_const'] = args.bottleneck_const
variant['dr3'] = args.dr3
variant['dr3_feat'] = args.dr3_feat
variant['dr3_weight'] = args.dr3_weight
variant['bottleneck_lagrange'] = args.bottleneck_lagrange
variant['bottleneck_dim'] = args.bottleneck_dim
variant['deterministic_bottleneck']=args.deterministic_bottleneck
variant['only_bottleneck'] = args.only_bottleneck
variant['gamma'] = args.gamma
variant['num_traj'] = args.num_traj
variant['num_sample'] = args.eval_num
variant['trainer_kwargs']['discount'] = args.discount
variant['debug'] = False
if args.buffer.isnumeric():
args.buffer = int(args.buffer)
home = expanduser("~")
p_data_path = os.path.join(home, 'prior_data/') if args.azure else '/nfs/kun1/users/asap7772/prior_data/'
# p_data_path = '/home/stephentian/prior_data/'
path = os.path.join(home, 'cog_data/') if args.azure else'/nfs/kun1/users/asap7772/cog_data/'
# path = '/home/stian/cog_data/'
buffers = []
ba = lambda x, p=args.prob, y=None: buffers.append((path+x,dict(p=p,alter_type=y,)))
if args.buffer == 0:
ba('closed_drawer_prior.npy',y='zero')
path = p_data_path
ba('task_singleneut_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-03-25T22-52-59_9750.npy')
elif args.buffer == 1:
ba('closed_drawer_prior.npy',y='zero')
ba('drawer_task.npy')
elif args.buffer == 2:
ba('closed_drawer_prior.npy',y='zero')
ba('drawer_task.npy',y='noise')
elif args.buffer == 3:
ba('closed_drawer_prior.npy',y='noise')
ba('drawer_task.npy',y='zero')
elif args.buffer == 4:
ba('closed_drawer_prior.npy',y='noise')
ba('drawer_task.npy',y='noise')
elif args.buffer == 5:
ba('drawer_task.npy')
if args.old_prior_prob > 0:
ba('closed_drawer_prior.npy',y='zero',p=args.old_prior_prob)
path = p_data_path
ba('grasp_newenv_Widow250DoubleDrawerOpenGraspNeutral-v0_20K_save_all_noise_0.1_2021-03-18T01-36-52_20000.npy',y='zero')
ba('pickplace_newenv_Widow250PickPlaceMultiObjectMultiContainerTrain-v0_20K_save_all_noise_0.1_2021-03-18T01-38-58_19500.npy',y='zero')
ba('drawer_newenv_Widow250DoubleDrawerOpenGraspNeutral-v0_20K_save_all_noise_0.1_2021-03-18T01-37-08_19500.npy', y='zero')
elif args.buffer == 6:
path = p_data_path
ba('task_multneut_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-03-25T22-53-21_9250.npy')
if args.old_prior_prob > 0:
path = '/nfs/kun1/users/asap7772/cog_data/'
ba('closed_drawer_prior.npy',y='zero',p=args.old_prior_prob)
ba('drawer_task.npy',y='noise')
path = p_data_path
ba('grasp_multneut_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-03-24T01-17-30_10000.npy', y='zero')
ba('double_drawer_multneut_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-03-24T01-19-23_9750.npy', y='zero')
elif args.buffer == 7:
path = p_data_path
ba('pick_Widow250PickTray-v0_10K_save_all_noise_0.1_2021-04-03T12-13-53_10000.npy',y='zero') #prior
ba('place_Widow250PlaceTray-v0_5K_save_all_noise_0.1_2021-04-03T12-14-02_4750.npy') #task
elif args.buffer == 8:
path = '/nfs/kun1/users/asap7772/cog_data/'
ba('pickplace_prior.npy',y='zero') #prior
path = p_data_path
ba('place_Widow250PlaceTray-v0_5K_save_all_noise_0.1_2021-04-03T12-14-02_4750.npy') #task
elif args.buffer == 9:
path = p_data_path
ba('pick_Widow250PickTray-v0_10K_save_all_noise_0.1_2021-04-03T12-13-53_10000.npy',y='zero') #prior
path = '/nfs/kun1/users/asap7772/cog_data/'
ba('pickplace_task.npy') #task
elif args.buffer == 10:
path = '/nfs/kun1/users/asap7772/cog_data/'
ba('pickplace_prior.npy',y='zero')
ba('pickplace_task.npy') #task
elif args.buffer == 11:
path = p_data_path
ba('coglike_prior_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-03T17-32-00_10000.npy', y='zero')
ba('coglike_task_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-03T17-32-05_10000.npy')
elif args.buffer == 12:
path = p_data_path
ba('coglike_prior_linking_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-05T11-11-02_9250.npy', y='zero')
ba('coglike_task_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-03T17-32-05_10000.npy')
elif args.buffer == 13:
path = p_data_path
ba('coglike_prior_manuallinking_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T00-36-15_10000.npy', y='zero',p=args.prob)
ba('coglike_task_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-03T17-32-05_10000.npy',p=args.prob)
elif args.buffer == 14:
path = p_data_path
ba('prior_reset5_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T13-48-08_10000.npy', y='zero')
ba('task_reset5_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T13-48-17_9000.npy')
elif args.buffer == 15:
path = p_data_path
ba('prior_reset10_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T13-48-23_10000.npy', y='zero')
ba('task_reset10_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T13-48-28_10000.npy')
elif args.buffer == 16:
path = p_data_path
ba('prior_reset100_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T13-48-35_10000.npy', y='zero')
ba('task_reset100_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T13-48-43_10000.npy')
elif args.buffer == 17:
path = p_data_path
ba('prior_reset2_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-56-50_8000.npy',y='zero')
ba('task_reset2_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-56-55_10000.npy')
elif args.buffer == 18:
path = p_data_path
ba('prior_reset3_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-57-01_10000.npy',y='zero')
ba('task_reset3_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-57-10_10000.npy')
elif args.buffer == 19:
path = p_data_path
ba('prior_reset1000_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-57-17_9000.npy',y='zero')
ba('task_reset1000_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-57-38_10000.npy')
elif args.buffer == 20:
path = p_data_path
ba('prior_reset10000_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-57-44_10000.npy',y='zero')
ba('task_reset10000_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-57-52_9000.npy')
elif args.buffer == 21:
path = p_data_path
ba('prior_resetinf_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-57-59_9000.npy',y='zero')
ba('task_resetinf_Widow250DoubleDrawerGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-08T10-58-08_10000.npy')
elif args.buffer == 22:
ba('closed_drawer_prior.npy',p=args.prob,y='zero')
ba('drawer_task.npy',p=args.prob)
elif args.buffer == 23:
path = p_data_path
ba('coglike_prior_manuallinking_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T00-36-15_10000.npy', y='zero')
ba('randobj_2_Widow250DoubleDrawerGraspNeutralRandObj-v0_10K_save_all_noise_0.1_2021-04-15T14-05-01_10000.npy')
elif args.buffer == 24:
path = p_data_path
ba('coglike_prior_manuallinking_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T00-36-15_10000.npy', y='zero')
ba('randobj_5_Widow250DoubleDrawerGraspNeutralRandObj-v0_10K_save_all_noise_0.1_2021-04-15T14-05-10_10000.npy')
elif args.buffer == 25:
path = p_data_path
ba('coglike_prior_manuallinking_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T00-36-15_10000.npy', y='zero')
ba('randobj_10_Widow250DoubleDrawerGraspNeutralRandObj-v0_10K_save_all_noise_0.1_2021-04-15T14-05-18_9000.npy')
elif args.buffer == 26:
path = p_data_path
ba('coglike_prior_manuallinking_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T00-36-15_10000.npy',p=args.prob, y='zero')
ba('coglike_task_noise0.1_Widow250DoubleDrawerGraspNeutral-v0_5K_save_all_noise_0.1_2021-04-23T02-22-30_4750.npy',p=args.prob,)
elif args.buffer == 27:
path = p_data_path
ba('coglike_prior_manuallinking_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T00-36-15_10000.npy',p=args.prob, y='zero')
ba('coglike_task_noise0.15_Widow250DoubleDrawerGraspNeutral-v0_5K_save_all_noise_0.15_2021-04-23T02-22-39_4625.npy',p=args.prob,)
elif args.buffer == 28:
path = p_data_path
ba('coglike_prior_manuallinking_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T00-36-15_10000.npy',p=args.prob, y='zero')
ba('coglike_task_noise0.2_Widow250DoubleDrawerGraspNeutral-v0_5K_save_all_noise_0.2_2021-04-23T02-22-44_4875.npy',p=args.prob,)
elif args.buffer == 28:
ba('coglike_prior_manuallinking_Widow250DoubleDrawerOpenGraspNeutral-v0_10K_save_all_noise_0.1_2021-04-06T00-36-15_10000.npy',p=args.prob, y='zero')
ba('coglike_task_noise0.2_Widow250DoubleDrawerGraspNeutral-v0_5K_save_all_noise_0.2_2021-04-23T02-22-44_4875.npy',p=args.prob,)
elif args.buffer == 29:
ba('pickplace_prior.npy', p=args.prob,y='zero')
ba('pickplace_task.npy', p=args.prob)
elif args.buffer == 30:
path = p_data_path
ba('pick_10obj_Widow250PickTrayMult-v0_5K_save_all_noise_0.1_2021-04-30T01-16-26_4500.npy', p=args.prob,y='zero')
ba('place_10obj_Widow250PlaceTrayMult-v0_5K_save_all_noise_0.1_2021-04-30T01-16-31_4875.npy', p=args.prob)
elif args.buffer == 31:
path = p_data_path
ba('pick_5obj_Widow250PickTrayMult-v0_5K_save_all_noise_0.1_2021-04-30T01-16-36_4750.npy', p=args.prob,y='zero')
ba('place_5obj_Widow250PlaceTrayMult-v0_5K_save_all_noise_0.1_2021-04-30T01-16-39_4750.npy', p=args.prob)
elif args.buffer == 32:
path = p_data_path
ba('pick_2obj_Widow250PickTrayMult-v0_5K_save_all_noise_0.1_2021-04-30T01-16-43_5000.npy', p=args.prob,y='zero')
ba('place_2obj_Widow250PlaceTrayMult-v0_5K_save_all_noise_0.1_2021-04-30T01-16-49_5000.npy', p=args.prob)
elif args.buffer == 33:
ba('blocked_drawer_1_prior.npy', p=args.prob,y='zero')
ba('drawer_task.npy', p=args.prob)
elif args.buffer == 34:
path = ''
if args.azure:
ba(os.path.join(os.expand_user('~'), 'grasping35obj', 'may11_Widow250OneObjectGraspTrain-v0_20K_save_all_noise_0.1_2021-05-11T16-56-48/may11_Widow250OneObjectGraspTrain-v0_20K_save_all_noise_0.1_2021-05-11T16-56-48_20000.npy'))
ba(os.path.join(os.expand_user('~'), 'grasping35obj', 'may11_Widow250OneObjectGraspTrain-v0_20K_save_all_noise_0.1_2021-05-11T16-56-48/may11_Widow250OneObjectGraspTrain-v0_20K_save_all_noise_0.1_2021-05-11T16-56-48_20000.npy'))
else:
ba('/nfs/kun1/users/avi/scripted_sim_datasets/may11_Widow250OneObjectGraspTrain-v0_20K_save_all_noise_0.1_2021-05-11T16-56-48/may11_Widow250OneObjectGraspTrain-v0_20K_save_all_noise_0.1_2021-05-11T16-56-48_20000.npy')
ba('/nfs/kun1/users/avi/scripted_sim_datasets/may11_Widow250OneObjectGraspTrain-v0_20K_save_all_noise_0.1_2021-05-11T16-56-48/may11_Widow250OneObjectGraspTrain-v0_20K_save_all_noise_0.1_2021-05-11T16-56-48_20000.npy')
elif args.buffer == 35:
path = p_data_path
ba('pick_35obj_Widow250PickTrayMult-v0_5K_save_all_noise_0.1_2021-05-07T01-17-10_4375.npy', p=args.prob, y='zero')
ba('place_35obj_Widow250PlaceTrayMult-v0_5K_save_all_noise_0.1_2021-04-30T01-17-42_4875.npy', p=args.prob)
elif args.buffer == 36:
path = p_data_path
ba('pick_20obj_Widow250PickTrayMult-v0_5K_save_all_noise_0.1_2021-05-07T01-17-01_4625.npy', p=args.prob,
y='zero')
ba('place_20obj_Widow250PlaceTrayMult-v0_5K_save_all_noise_0.1_2021-06-14T21-53-31_5000.npy', p=args.prob)
elif args.buffer == 37:
path = p_data_path
ba('drawer_prior_multobj_Widow250DoubleDrawerOpenGraspNeutralRandObj-v0_10K_save_all_noise_0.1_2021-06-23T11-52-07_10000.npy', p=args.prob, y='zero')
ba('drawer_task_multobj_Widow250DoubleDrawerGraspNeutralRandObj-v0_10K_save_all_noise_0.1_2021-06-23T11-52-15_9750.npy', p=args.prob)
elif args.buffer == 38:
path = p_data_path
ba('drawer_prior_multobj_Widow250DoubleDrawerOpenGraspNeutralRandObj-v0_10K_save_all_noise_0.1_2021-08-02T23-59-33_9500.npy', p=args.prob, y='zero')
ba('drawer_task_multobj_Widow250DoubleDrawerGraspNeutralRandObj-v0_10K_save_all_noise_0.1_2021-08-02T23-59-38_9500.npy', p=args.prob)
elif args.buffer == 39:
path = p_data_path
ba('drawer_prior_overlap_Widow250DoubleDrawerOpenGraspNeutralRandObjOverlap-v0_10K_save_all_noise_0.1_2021-08-02T23-58-16_9500.npy', p=args.prob, y='zero')
ba('drawer_task_overlap_Widow250DoubleDrawerGraspNeutralRandObjOverlap-v0_10K_save_all_noise_0.1_2021-08-02T23-58-16_9500.npy', p=args.prob)
elif args.buffer == 9000:
variant['debug'] = True
path = p_data_path
ba('debug.npy',y='noise')
ba('debug.npy',y='noise')
elif args.buffer == 9001: #for testing wandb code
variant['debug'] = False
path = p_data_path
ba('debug.npy',y='noise')
ba('debug.npy',y='noise')
variant['buffer'] = buffers
variant['bufferidx'] = args.buffer
else:
variant['buffer'] = None
if variant['buffer'] is not None:
if args.buffer in [5,6]:
variant['prior_buffer'] = buffers[1:]
variant['task_buffer'] = buffers[0]
else:
variant['prior_buffer'] = buffers[0]
variant['task_buffer'] = buffers[1]
variant['trainer_kwargs']['max_q_backup'] = args.max_q_backup
variant['trainer_kwargs']['deterministic_backup'] = \
not args.no_deterministic_backup
variant['trainer_kwargs']['min_q_weight'] = args.min_q_weight
variant['trainer_kwargs']['policy_lr'] = args.policy_lr
variant['trainer_kwargs']['min_q_version'] = args.min_q_version
variant['trainer_kwargs']['policy_eval_start'] = int(args.policy_eval_start)
variant['trainer_kwargs']['lagrange_thresh'] = args.lagrange_thresh
variant['trainer_kwargs']['with_lagrange'] = args.use_lagrange
variant['duplicate'] = args.duplicate
variant['mcret'] = args.mcret
variant['bchead'] = args.bchead
# Translate 0/1 rewards to +4/+10 rewards.
variant['use_positive_rew'] = args.use_positive_rew
variant['seed'] = args.seed
ptu.set_gpu_mode(True)
exp_prefix = 'cql-cog-{}'.format(args.env)
if os.path.isdir(CUSTOM_LOG_DIR):
base_log_dir = CUSTOM_LOG_DIR
else:
base_log_dir = None
variant['base_log_dir'] = base_log_dir
log_dir = setup_logger(args.name, variant=variant, base_log_dir=base_log_dir,
snapshot_mode='gap_and_last', snapshot_gap=10,)
variant['log_dir'] = log_dir
experiment(variant)
| 49.213843
| 243
| 0.646655
|
4a1513ce575930883dbd5126772747256565f342
| 3,319
|
py
|
Python
|
scripts/fig1_mayavi_headon.py
|
rohitsupekar/active_matter_spheres
|
7fb144e8c5bb8a7b562223eb32235d0eb21b07c8
|
[
"MIT"
] | 1
|
2019-02-12T21:21:09.000Z
|
2019-02-12T21:21:09.000Z
|
scripts/fig1_mayavi_headon.py
|
rohitsupekar/active_matter_spheres
|
7fb144e8c5bb8a7b562223eb32235d0eb21b07c8
|
[
"MIT"
] | 1
|
2019-05-31T22:25:22.000Z
|
2019-06-10T17:55:05.000Z
|
scripts/fig1_mayavi_headon.py
|
rohitsupekar/active_matter_spheres
|
7fb144e8c5bb8a7b562223eb32235d0eb21b07c8
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append("../") # go to parent dir
import glob
import time
import logging
import numpy as np
from scipy.sparse import linalg as spla
import matplotlib.pyplot as plt
import logging
from mpl_toolkits import mplot3d
from mayavi import mlab
from scipy.special import sph_harm
mlab.options.offscreen = False
#add path to data folder
input_folder = "/Volumes/ExtDrive/data"
output_folder = "plots"
dpi=300
cmap="coolwarm"
ind = 4500 #time ind
with np.load(os.path.join(input_folder, 'sphere113/output_%i.npz' %(ind))) as file:
om1 = file['om']
time = file['t'][0]
print('time=%f' %time)
with np.load(os.path.join(input_folder, 'sphere114/output_%i.npz' %(ind))) as file:
om2 = file['om']
time = file['t'][0]
print('time=%f' %time)
with np.load(os.path.join(input_folder, 'sphere115/output_%i.npz' %(ind))) as file:
om3 = file['om']
time = file['t'][0]
print('time=%f' %time)
with np.load(os.path.join(input_folder, 'sphere111/output_%i.npz' %(ind))) as file:
om4 = file['om']
time = file['t'][0]
print('time=%f' %time)
with np.load(os.path.join(input_folder, 'sphere109/output_%i.npz' %(ind))) as file:
om5 = file['om']
time = file['t'][0]
print('time=%f' %time)
with np.load(os.path.join(input_folder, 'sphere110/output_%i.npz' %(ind))) as file:
om6 = file['om']
time = file['t'][0]
print('time=%f' %time)
with np.load(os.path.join(input_folder, 'sphere116/output_%i.npz' %(ind))) as file:
om7 = file['om']
time = file['t'][0]
print('time=%f' %time)
with np.load(os.path.join(input_folder, 'sphere117/output_%i.npz' %(ind))) as file:
om8 = file['om']
time = file['t'][0]
print('time=%f' %time)
with np.load(os.path.join(input_folder, 'sphere118/output_%i.npz' %(ind))) as file:
phi = file['phi']
theta = file['theta']
om9 = file['om']
time = file['t'][0]
print('time=%f' %time)
#change phi
phi = np.linspace(0, 2*np.pi, len(phi))
# Create a sphere
r = 0.3
pi = np.pi
cos = np.cos
sin = np.sin
phiphi, thth = np.meshgrid(theta, phi-pi)
x = r * sin(phiphi) * cos(thth)
y = r * sin(phiphi) * sin(thth)
z = r * cos(phiphi)
#s = sph_harm(0, 10, theta, phi).real
mlab.figure(1, bgcolor=(0, 0, 0), fgcolor=(1, 1, 1), size=(800, 700))
mlab.clf()
cmin, cmax = -300, 300
dx = 0.7
m = mlab.mesh(x, y, z+2*dx, scalars=om1, colormap=cmap)
m = mlab.mesh(x+dx, y, z+2*dx, scalars=om2, colormap=cmap)
m = mlab.mesh(x+2*dx, y, z+2*dx, scalars=om3, colormap=cmap)
m = mlab.mesh(x, y, z+dx, scalars=om4, colormap=cmap)
m = mlab.mesh(x+dx, y, z+dx, scalars=om5, colormap=cmap)
m = mlab.mesh(x+2*dx, y, z+dx, scalars=om6, colormap=cmap)
m = mlab.mesh(x, y, z, scalars=om7, colormap=cmap)
m = mlab.mesh(x+dx, y, z, scalars=om8, colormap=cmap)
m = mlab.mesh(x+2*dx, y, z, scalars=om9, colormap=cmap)
mlab.view(-90, 90, distance=4)
#mlab.savefig("%s/mayavi.pdf" %(output_folder), magnification=100)
#mlab.show()
#mlab.figure(2, bgcolor=(0, 0, 0), fgcolor=(1, 1, 1), size=(700, 300))
#mlab.clf()
#m = mlab.mesh(x, y, z, scalars=om3, colormap=cmap)
#m = mlab.mesh(x+0.7, y, z, scalars=om6, colormap=cmap)
#m = mlab.mesh(x+1.4, y, z, scalars=om9, colormap=cmap)
#mlab.view(-90, 90, distance=1.5)
#mlab.savefig("%s/mayavi_front.pdf" %(output_folder), magnification=100)
mlab.show()
| 27.429752
| 83
| 0.642362
|
4a1514647ce9cf36a2a61603be65bb5bbe2a5e74
| 992
|
py
|
Python
|
final_project/machinetranslation/translator.py
|
sugan84/xzceb-flask_eng_fr
|
18adfcefd838580490eb9a74da1356d76f0ecd9d
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/translator.py
|
sugan84/xzceb-flask_eng_fr
|
18adfcefd838580490eb9a74da1356d76f0ecd9d
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/translator.py
|
sugan84/xzceb-flask_eng_fr
|
18adfcefd838580490eb9a74da1356d76f0ecd9d
|
[
"Apache-2.0"
] | null | null | null |
"""
Translator Module
"""
#import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2021-11-11',
authenticator=authenticator
)
language_translator.set_service_url(url)
def englishToFrench(eng_text):
"""
Convert english to french
"""
if eng_text != '':
translation = language_translator.translate(text=eng_text, model_id='en-fr').\
get_result()
return translation['translations'][0]['translation']
def frenchToEnglish(fr_text):
"""
Convert french to english
"""
if fr_text != '':
translation = language_translator.translate(text=fr_text, model_id='fr-en').\
get_result()
return translation['translations'][0]['translation']
| 23.619048
| 86
| 0.704637
|
4a1514d5850d86b55fa468fb7b0c98b53c969c64
| 19,950
|
py
|
Python
|
lib/ReadsUtils/ReadsUtilsImpl.py
|
aekazakov/kb_blast
|
3af28a72bc783a0da7f301e359a7c2e420d1fa74
|
[
"MIT"
] | null | null | null |
lib/ReadsUtils/ReadsUtilsImpl.py
|
aekazakov/kb_blast
|
3af28a72bc783a0da7f301e359a7c2e420d1fa74
|
[
"MIT"
] | null | null | null |
lib/ReadsUtils/ReadsUtilsImpl.py
|
aekazakov/kb_blast
|
3af28a72bc783a0da7f301e359a7c2e420d1fa74
|
[
"MIT"
] | null | null | null |
#BEGIN_HEADER
import time
import subprocess
import os
import tempfile
import shutil
from DataFileUtil.DataFileUtilClient import DataFileUtil
from numbers import Number
import six
#END_HEADER
class ReadsUtils:
'''
Module Name:
ReadsUtils
Module Description:
Utilities for handling reads files.
'''
######## WARNING FOR GEVENT USERS #######
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
#########################################
VERSION = "0.0.1"
GIT_URL = "https://github.com/mrcreosote/ReadsUtils"
GIT_COMMIT_HASH = "356d1eeae8663b59c9e0bee0b113e0b05c7c80df"
#BEGIN_CLASS_HEADER
FASTA_JAR = '/opt/lib/FastaValidator-1.0.jar'
FASTQ_EXE = 'fastQValidator'
FASTA_EXT = ['.fa', '.fas', '.fasta', '.fna']
FASTQ_EXT = ['.fq', '.fastq', '.fnq']
def log(self, message, prefix_newline=False):
print(('\n' if prefix_newline else '') +
str(time.time()) + ': ' + message)
def xor(self, a, b):
return bool(a) != bool(b)
def _add_field(self, obj, params, field):
f = params.get(field)
if f:
obj[field] = f
def _check_pos(self, num, name):
if num is not None:
if not isinstance(num, Number):
raise ValueError(name + ' must be a number')
if num <= 0:
raise ValueError(name + ' must be > 0')
def _proc_upload_reads_params(self, ctx, params):
fwdid = params.get('fwd_id')
if not fwdid:
raise ValueError('No reads file provided')
wsid = params.get('wsid')
wsname = params.get('wsname')
if not self.xor(wsid, wsname):
raise ValueError(
'Exactly one of the workspace ID or name must be provided')
dfu = DataFileUtil(self.callback_url, token=ctx['token'])
if wsname:
self.log('Translating workspace name to id')
if not isinstance(wsname, six.string_types):
raise ValueError('wsname must be a string')
wsid = dfu.ws_name_to_id(wsname)
self.log('translation done')
del wsname
objid = params.get('objid')
name = params.get('name')
if not self.xor(objid, name):
raise ValueError(
'Exactly one of the object ID or name must be provided')
revid = params.get('rev_id')
interleaved = 1 if params.get('interleaved') else 0
kbtype = 'KBaseFile.SingleEndLibrary'
single_end = True
if interleaved or revid:
kbtype = 'KBaseFile.PairedEndLibrary'
single_end = False
if revid:
interleaved = 0
seqtype = params.get('sequencing_tech')
if not seqtype:
raise ValueError('The sequencing technology must be provided')
sg = 1
if 'single_genome' in params and not params['single_genome']:
sg = 0
o = {'sequencing_tech': seqtype,
'single_genome': sg,
# 'read_count': params.get('read_count'),
# 'read_size': params.get('read_size'),
# 'gc_content': params.get('gc_content')
}
self._add_field(o, params, 'strain')
self._add_field(o, params, 'source')
ism = params.get('insert_size_mean')
self._check_pos(ism, 'insert_size_mean')
issd = params.get('insert_size_std_dev')
self._check_pos(issd, 'insert_size_std_dev')
if not single_end:
o.update({'insert_size_mean': ism,
'insert_size_std_dev': issd,
'interleaved': interleaved,
'read_orientation_outward': 1 if params.get(
'read_orientation_outward') else 0
})
return o, wsid, name, objid, kbtype, single_end, fwdid, revid
def validateFASTA(self, ctx, params):
"""
Validate a FASTA file. The file extensions .fa, .fas, .fna. and .fasta
are accepted.
:param file_path: instance of String
:returns: instance of type "boolean" (A boolean - 0 for false, 1 for
true. @range (0, 1))
"""
# ctx is the context object
# return variables are: validated
# OLD BEGIN validateFASTA
del ctx
file_path = params.get('file_path')
if not file_path or not os.path.isfile(file_path):
raise ValueError('No such file: ' + str(file_path))
if os.path.splitext(file_path)[1] not in self.FASTA_EXT:
raise ValueError('File {} is not a FASTA file'.format(file_path))
self.log('Validating FASTA file ' + file_path)
# TODO per transform service notes, we need a better fasta validator
# a good start would be not calling FVTester but writing our own
# wrapper (Py4J?) that takes options for types etc.
# see https://github.com/jwaldman/FastaValidator/blob/master/src/demo/FVTester.java @IgnorePep8
# note the version in jars returns non-zero error codes:
# https://github.com/srividya22/FastaValidator/commit/67e2d860f1869b9a76033e71fb2aaff910b7c2e3 @IgnorePep8
# Better yet, move this to a Java SDK module, don't make a system
# call and expose useful options.
retcode = subprocess.call(
['java', '-classpath', self.FASTA_JAR, 'FVTester', file_path])
self.log('Validation return code: ' + str(retcode))
validated = 1 if retcode == 0 else 0
self.log('Validation ' + ('succeeded' if validated else 'failed'))
out = {'valid': validated}
# OLD END validateFASTA
# At some point might do deeper type checking...
if not isinstance(out, dict):
raise ValueError('Method validateFASTA return value ' +
'out is not type dict as required.')
# return the results
return [out]
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.scratch = config['scratch']
self.callback_url = os.environ['SDK_CALLBACK_URL']
#END_CONSTRUCTOR
pass
def validateFASTQ(self, ctx, params):
"""
Validate a FASTQ file. The file extensions .fq, .fnq, and .fastq
are accepted. Note that prior to validation the file will be altered in
place to remove blank lines if any exist.
:param params: instance of list of type "ValidateFASTQParams" (Input
to the validateFASTQ function. Required parameters: file_path -
the path to the file to validate. Optional parameters: interleaved
- whether the file is interleaved or not. Setting this to true
disables sequence ID checks.) -> structure: parameter "file_path"
of String, parameter "interleaved" of type "boolean" (A boolean -
0 for false, 1 for true. @range (0, 1))
:returns: instance of list of type "ValidateFASTQOutput" (The output
of the validateFASTQ function. validated - whether the file
validated successfully or not.) -> structure: parameter
"validated" of type "boolean" (A boolean - 0 for false, 1 for
true. @range (0, 1))
"""
# ctx is the context object
# return variables are: out
#BEGIN validateFASTQ
del ctx
# TODO try and parse the validator output and return errors
out = []
for p in params:
file_path = p.get('file_path')
if not file_path or not os.path.isfile(file_path):
raise ValueError('No such file: ' + str(file_path))
if os.path.splitext(file_path)[1] not in self.FASTQ_EXT:
raise ValueError('File {} is not a FASTQ file'
.format(file_path))
self.log('Validating FASTQ file ' + file_path)
self.log('Checking line count')
c = 0
blank = False
# open assumes ascii, which is ok for reads
with open(file_path) as f: # run & count until we hit a blank line
for l in f:
if not l.strip():
blank = True
break
c += 1
if blank:
c = 0
self.log('Removing blank lines')
with open(file_path) as s, tempfile.NamedTemporaryFile(
mode='w', dir=self.scratch) as t:
for l in s:
l = l.strip()
if l:
t.write(l + '\n')
c += 1
s.close()
t.flush()
shutil.copy2(t.name, file_path)
validated = 1
if c % 4 != 0:
err = ('Invalid FASTQ file, expected multiple of 4 lines, ' +
'got ' + str(c))
self.log(err)
validated = 0
else:
self.log(str(c) + ' lines in file')
if validated:
arguments = [self.FASTQ_EXE, '--file', file_path,
'--maxErrors', '10']
if p.get('interleaved'):
arguments.append('--disableSeqIDCheck')
retcode = subprocess.call(arguments)
self.log('Validation return code: ' + str(retcode))
validated = 1 if retcode == 0 else 0
self.log('Validation ' +
('succeeded' if validated else 'failed'))
out.append({'validated': validated})
#END validateFASTQ
# At some point might do deeper type checking...
if not isinstance(out, list):
raise ValueError('Method validateFASTQ return value ' +
'out is not type list as required.')
# return the results
return [out]
def upload_reads(self, ctx, params):
"""
Loads a set of reads to KBase data stores.
:param params: instance of type "UploadReadsParams" (Input to the
upload_reads function. Required parameters: fwd_id - the id of the
shock node containing the reads data file: either single end
reads, forward/left reads, or interleaved reads. sequencing_tech -
the sequencing technology used to produce the reads. One of: wsid
- the id of the workspace where the reads will be saved
(preferred). wsname - the name of the workspace where the reads
will be saved. One of: objid - the id of the workspace object to
save over name - the name to which the workspace object will be
saved Optional parameters: rev_id - the shock node id containing
the reverse/right reads for paired end, non-interleaved reads.
single_genome - whether the reads are from a single genome or a
metagenome. Default is single genome. strain - information about
the organism strain that was sequenced. source - information about
the organism source. interleaved - specify that the fwd reads file
is an interleaved paired end reads file as opposed to a single end
reads file. Default true, ignored if rev_id is specified.
read_orientation_outward - whether the read orientation is outward
from the set of primers. Default is false and is ignored for
single end reads. insert_size_mean - the mean size of the genetic
fragments. Ignored for single end reads. insert_size_std_dev - the
standard deviation of the size of the genetic fragments. Ignored
for single end reads.) -> structure: parameter "fwd_id" of String,
parameter "wsid" of Long, parameter "wsname" of String, parameter
"objid" of Long, parameter "name" of String, parameter "rev_id" of
String, parameter "sequencing_tech" of String, parameter
"single_genome" of type "boolean" (A boolean - 0 for false, 1 for
true. @range (0, 1)), parameter "strain" of type "StrainInfo"
(Information about a strain. genetic_code - the genetic code of
the strain. See
http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi?mode=c
genus - the genus of the strain species - the species of the
strain strain - the identifier for the strain source - information
about the source of the strain organelle - the organelle of
interest for the related data (e.g. mitochondria) ncbi_taxid - the
NCBI taxonomy ID of the strain location - the location from which
the strain was collected @optional genetic_code source ncbi_taxid
organelle location) -> structure: parameter "genetic_code" of
Long, parameter "genus" of String, parameter "species" of String,
parameter "strain" of String, parameter "organelle" of String,
parameter "source" of type "SourceInfo" (Information about the
source of a piece of data. source - the name of the source (e.g.
NCBI, JGI, Swiss-Prot) source_id - the ID of the data at the
source project_id - the ID of a project encompassing the data at
the source @optional source source_id project_id) -> structure:
parameter "source" of String, parameter "source_id" of type
"source_id" (An ID used for a piece of data at its source. @id
external), parameter "project_id" of type "project_id" (An ID used
for a project encompassing a piece of data at its source. @id
external), parameter "ncbi_taxid" of Long, parameter "location" of
type "Location" (Information about a location. lat - latitude of
the site, recorded as a decimal number. North latitudes are
positive values and south latitudes are negative numbers. lon -
longitude of the site, recorded as a decimal number. West
longitudes are positive values and east longitudes are negative
numbers. elevation - elevation of the site, expressed in meters
above sea level. Negative values are allowed. date - date of an
event at this location (for example, sample collection), expressed
in the format YYYY-MM-DDThh:mm:ss.SSSZ description - a free text
description of the location and, if applicable, the associated
event. @optional date description) -> structure: parameter "lat"
of Double, parameter "lon" of Double, parameter "elevation" of
Double, parameter "date" of String, parameter "description" of
String, parameter "source" of type "SourceInfo" (Information about
the source of a piece of data. source - the name of the source
(e.g. NCBI, JGI, Swiss-Prot) source_id - the ID of the data at the
source project_id - the ID of a project encompassing the data at
the source @optional source source_id project_id) -> structure:
parameter "source" of String, parameter "source_id" of type
"source_id" (An ID used for a piece of data at its source. @id
external), parameter "project_id" of type "project_id" (An ID used
for a project encompassing a piece of data at its source. @id
external), parameter "interleaved" of type "boolean" (A boolean -
0 for false, 1 for true. @range (0, 1)), parameter
"read_orientation_outward" of type "boolean" (A boolean - 0 for
false, 1 for true. @range (0, 1)), parameter "insert_size_mean" of
Double, parameter "insert_size_std_dev" of Double
:returns: instance of type "UploadReadsOutput" (The output of the
upload_reads function. obj_ref - a reference to the new Workspace
object in the form X/Y/Z, where X is the workspace ID, Y is the
object ID, and Z is the version.) -> structure: parameter
"obj_ref" of String
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN upload_reads
self.log('Starting upload reads, parsing args')
o, wsid, name, objid, kbtype, single_end, fwdid, revid = (
self._proc_upload_reads_params(ctx, params))
interleaved = 1 if (not single_end and not revid) else 0
fileinput = [{'shock_id': fwdid,
'file_path': self.scratch + '/fwd/',
'unpack': 'uncompress'}]
if revid:
fileinput.append({'shock_id': revid,
'file_path': self.scratch + '/rev/',
'unpack': 'uncompress'})
dfu = DataFileUtil(self.callback_url, token=ctx['token'])
self.log('downloading reads files from Shock')
files = dfu.shock_to_file_mass(fileinput)
self.log('download complete, validating files')
for f, i in zip(files, fileinput):
if not self.validateFASTQ(
ctx, [{'file_path': f['file_path'],
'interleaved': interleaved
}])[0][0]['validated']:
raise ValueError('Invalid fasta file {} from Shock node {}'
.format(f['file_path'], i['shock_id']))
self.log('file validation complete')
self.log('coercing forward reads node to my control, muhahahaha!')
fwdr = dfu.own_shock_node({'shock_id': fwdid, 'make_handle': 1})
self.log('coercing complete, my evil schemes know no bounds')
revr = None
if revid:
self.log('coercing reverse reads node to my control, muhahahaha!')
revr = dfu.own_shock_node({'shock_id': revid, 'make_handle': 1})
self.log('coercing complete. Will I stop at nothing?')
# TODO calculate gc content, read size, read_count (find a program)
fwdfile = {'file': fwdr['handle'],
'encoding': 'ascii',
'size': files[0]['size'],
'type': 'fq'
}
if single_end:
o['lib'] = fwdfile
else:
o['lib1'] = fwdfile
if revr:
o['lib2'] = {'file': revr['handle'],
'encoding': 'ascii',
'size': files[1]['size'],
'type': 'fq'
}
so = {'type': kbtype,
'data': o
}
if name:
so['name'] = name
else:
so['objid'] = objid
self.log('saving workspace object')
oi = dfu.save_objects({'id': wsid, 'objects': [so]})[0]
self.log('save complete')
returnVal = {'obj_ref': str(oi[6]) + '/' + str(oi[0]) + '/' +
str(oi[4])}
#END upload_reads
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method upload_reads return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def status(self, ctx):
#BEGIN_STATUS
del ctx
returnVal = {'state': 'OK',
'message': '',
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| 47.051887
| 114
| 0.579649
|
4a15171b6064f676e51d3e338f3ad734330384bd
| 8,291
|
py
|
Python
|
driver.py
|
ct2034/PRIMAL2
|
1bd44cba6f674e37e78b893a323d35a348dfb759
|
[
"MIT"
] | 58
|
2020-10-18T11:14:05.000Z
|
2022-03-27T11:52:24.000Z
|
driver.py
|
ct2034/PRIMAL2
|
1bd44cba6f674e37e78b893a323d35a348dfb759
|
[
"MIT"
] | 8
|
2020-11-20T07:41:03.000Z
|
2021-12-14T08:36:42.000Z
|
driver.py
|
ct2034/PRIMAL2
|
1bd44cba6f674e37e78b893a323d35a348dfb759
|
[
"MIT"
] | 29
|
2020-11-11T10:18:21.000Z
|
2022-02-21T09:57:35.000Z
|
import numpy as np
import tensorflow as tf
import os
import ray
from Ray_ACNet import ACNet
from Runner import imitationRunner, RLRunner
from parameters import *
import random
ray.init(num_gpus=1)
tf.reset_default_graph()
print("Hello World")
config = tf.ConfigProto(allow_soft_placement = True)
config.gpu_options.per_process_gpu_memory_fraction = 1.0 / (NUM_META_AGENTS - NUM_IL_META_AGENTS + 1)
config.gpu_options.allow_growth=True
# Create directories
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(gifs_path):
os.makedirs(gifs_path)
global_step = tf.placeholder(tf.float32)
if ADAPT_LR:
# computes LR_Q/sqrt(ADAPT_COEFF*steps+1)
# we need the +1 so that lr at step 0 is defined
lr = tf.divide(tf.constant(LR_Q), tf.sqrt(tf.add(1., tf.multiply(tf.constant(ADAPT_COEFF), global_step))))
else:
lr = tf.constant(LR_Q)
def apply_gradients(global_network, gradients, sess, curr_episode):
feed_dict = {
global_network.tempGradients[i]: g for i, g in enumerate(gradients)
}
feed_dict[global_step] = curr_episode
sess.run([global_network.apply_grads], feed_dict=feed_dict)
def writeImitationDataToTensorboard(global_summary, metrics, curr_episode):
summary = tf.Summary()
summary.value.add(tag='Losses/Imitation loss', simple_value=metrics[0])
global_summary.add_summary(summary, curr_episode)
global_summary.flush()
def writeEpisodeRatio(global_summary, numIL, numRL, sess, curr_episode):
summary = tf.Summary()
current_learning_rate = sess.run(lr, feed_dict={global_step: curr_episode})
RL_IL_Ratio = numRL / (numRL + numIL)
summary.value.add(tag='Perf/Num IL Ep.', simple_value=numIL)
summary.value.add(tag='Perf/Num RL Ep.', simple_value=numRL)
summary.value.add(tag='Perf/ RL IL ratio Ep.', simple_value=RL_IL_Ratio)
summary.value.add(tag='Perf/Learning Rate', simple_value=current_learning_rate)
global_summary.add_summary(summary, curr_episode)
global_summary.flush()
def writeToTensorBoard(global_summary, tensorboardData, curr_episode, plotMeans=True):
# each row in tensorboardData represents an episode
# each column is a specific metric
if plotMeans == True:
tensorboardData = np.array(tensorboardData)
tensorboardData = list(np.mean(tensorboardData, axis=0))
valueLoss, policyLoss, validLoss, entropyLoss, gradNorm, varNorm,\
mean_length, mean_value, mean_invalid, \
mean_stop, mean_reward, mean_finishes = tensorboardData
else:
firstEpisode = tensorboardData[0]
valueLoss, policyLoss, validLoss, entropyLoss, gradNorm, varNorm, \
mean_length, mean_value, mean_invalid, \
mean_stop, mean_reward, mean_finishes = firstEpisode
summary = tf.Summary()
summary.value.add(tag='Perf/Reward', simple_value=mean_reward)
summary.value.add(tag='Perf/Targets Done', simple_value=mean_finishes)
summary.value.add(tag='Perf/Length', simple_value=mean_length)
summary.value.add(tag='Perf/Valid Rate', simple_value=(mean_length - mean_invalid) / mean_length)
summary.value.add(tag='Perf/Stop Rate', simple_value=(mean_stop) / mean_length)
summary.value.add(tag='Losses/Value Loss', simple_value=valueLoss)
summary.value.add(tag='Losses/Policy Loss', simple_value=policyLoss)
summary.value.add(tag='Losses/Valid Loss', simple_value=validLoss)
summary.value.add(tag='Losses/Entropy Loss', simple_value=entropyLoss)
summary.value.add(tag='Losses/Grad Norm', simple_value=gradNorm)
summary.value.add(tag='Losses/Var Norm', simple_value=varNorm)
global_summary.add_summary(summary, int(curr_episode - len(tensorboardData)))
global_summary.flush()
def main():
with tf.device("/gpu:0"):
trainer = tf.contrib.opt.NadamOptimizer(learning_rate=lr, use_locking=True)
global_network = ACNet(GLOBAL_NET_SCOPE,a_size,trainer,False,NUM_CHANNEL, OBS_SIZE,GLOBAL_NET_SCOPE, GLOBAL_NETWORK=True)
global_summary = tf.summary.FileWriter(train_path)
saver = tf.train.Saver(max_to_keep=1)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
if load_model == True:
print ('Loading Model...')
ckpt = tf.train.get_checkpoint_state(model_path)
p=ckpt.model_checkpoint_path
p=p[p.find('-')+1:]
p=p[:p.find('.')]
curr_episode=int(p)
saver.restore(sess,ckpt.model_checkpoint_path)
print("curr_episode set to ",curr_episode)
else:
curr_episode = 0
# launch all of the threads:
il_agents = [imitationRunner.remote(i) for i in range(NUM_IL_META_AGENTS)]
rl_agents = [RLRunner.remote(i) for i in range(NUM_IL_META_AGENTS, NUM_META_AGENTS)]
meta_agents = il_agents + rl_agents
# get the initial weights from the global network
weight_names = tf.trainable_variables()
weights = sess.run(weight_names) # Gets weights in numpy arrays CHECK
weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# launch the first job (e.g. getGradient) on each runner
jobList = [] # Ray ObjectIDs
for i, meta_agent in enumerate(meta_agents):
jobList.append(meta_agent.job.remote(weights, curr_episode))
curr_episode += 1
tensorboardData = []
IDs = [None] * NUM_META_AGENTS
numImitationEpisodes = 0
numRLEpisodes = 0
try:
while True:
# wait for any job to be completed - unblock as soon as the earliest arrives
done_id, jobList = ray.wait(jobList)
# get the results of the task from the object store
jobResults, metrics, info = ray.get(done_id)[0]
# imitation episodes write different data to tensorboard
if info['is_imitation']:
if jobResults:
writeImitationDataToTensorboard(global_summary, metrics, curr_episode)
numImitationEpisodes += 1
else:
if jobResults:
tensorboardData.append(metrics)
numRLEpisodes += 1
# Write ratio of RL to IL episodes to tensorboard
writeEpisodeRatio(global_summary, numImitationEpisodes, numRLEpisodes, sess, curr_episode)
if JOB_TYPE == JOB_OPTIONS.getGradient:
if jobResults:
for gradient in jobResults:
apply_gradients(global_network, gradient, sess, curr_episode)
elif JOB_TYPE == JOB_OPTIONS.getExperience:
print("not implemented")
assert(1==0)
else:
print("not implemented")
assert(1==0)
# Every `SUMMARY_WINDOW` RL episodes, write RL episodes to tensorboard
if len(tensorboardData) >= SUMMARY_WINDOW:
writeToTensorBoard(global_summary, tensorboardData, curr_episode)
tensorboardData = []
# get the updated weights from the global network
weight_names = tf.trainable_variables()
weights = sess.run(weight_names)
curr_episode += 1
# start a new job on the recently completed agent with the updated weights
jobList.extend([meta_agents[info['id']].job.remote(weights, curr_episode)])
if curr_episode % 100 == 0:
print ('Saving Model', end='\n')
saver.save(sess, model_path+'/model-'+str(int(curr_episode))+'.cptk')
print ('Saved Model', end='\n')
except KeyboardInterrupt:
print("CTRL-C pressed. killing remote workers")
for a in meta_agents:
ray.kill(a)
if __name__ == "__main__":
main()
| 35.583691
| 129
| 0.636111
|
4a15177674a8d946ada6ab0382b2089e40dab0a4
| 1,414
|
py
|
Python
|
2016/January/gates/gates.py
|
alantao5056/USACO_Silver
|
6998cb916692af58a0b40b1a4aff0708ee1106b8
|
[
"MIT"
] | null | null | null |
2016/January/gates/gates.py
|
alantao5056/USACO_Silver
|
6998cb916692af58a0b40b1a4aff0708ee1106b8
|
[
"MIT"
] | null | null | null |
2016/January/gates/gates.py
|
alantao5056/USACO_Silver
|
6998cb916692af58a0b40b1a4aff0708ee1106b8
|
[
"MIT"
] | null | null | null |
def getCord(d, curCord):
# updates cord
if d == "N":
return (curCord[0], curCord[1] + 1,)
elif d == "E":
return (curCord[0] + 1, curCord[1],)
elif d == "S":
return (curCord[0], curCord[1] - 1,)
else:
# curCord is "W"
return (curCord[0] - 1, curCord[1],)
def getNumOfGates(N, directions):
# main function
count = 0
visitedDirec = set([(0, 0,)])
fences = {}
lastCord = (0, 0)
curCord = (0, 0)
for i in range(0, N):
curCord = getCord(directions[i], curCord) # updates curCord
# putting cords in hash
if curCord in fences:
fences[curCord].add(lastCord)
else:
fences[curCord] = set([lastCord])
# check if touching original point
if curCord in visitedDirec:
# check if already drawn line
if curCord not in fences[lastCord]:
count += 1
# putting cords in hash
if lastCord in fences:
fences[lastCord].add(curCord)
else:
fences[lastCord] = set([curCord])
visitedDirec.add(curCord)
lastCord = curCord
return count
def main(inputFile, outputFile):
# reads input
gatesInput = open(inputFile, 'r')
N = int(gatesInput.readline().strip())
directions = gatesInput.readline().strip()
gatesInput.close()
# writes output
gatesOutput = open(outputFile, 'w')
gatesOutput.write(str(getNumOfGates(N, directions)) + '\n')
gatesOutput.close()
main('gates.in', 'gates.out')
| 22.09375
| 63
| 0.622348
|
4a1517f7cd5c79cd9861d1a36cfbd26967bfcacc
| 1,187
|
py
|
Python
|
bqskit/compiler/executor.py
|
jkalloor3/bqskit
|
ad34a6eae3c0e62d2bd960cd4cd841ba8e845811
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
bqskit/compiler/executor.py
|
jkalloor3/bqskit
|
ad34a6eae3c0e62d2bd960cd4cd841ba8e845811
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
bqskit/compiler/executor.py
|
jkalloor3/bqskit
|
ad34a6eae3c0e62d2bd960cd4cd841ba8e845811
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
"""This module implements the Executor class."""
from __future__ import annotations
from typing import Any
from threadpoolctl import threadpool_limits
from bqskit.compiler.task import CompilationTask
from bqskit.ir.circuit import Circuit
class Executor:
"""An Executor is responsible for executing a CompilationTask."""
def __init__(self, task: CompilationTask) -> None:
"""
Construct an Executor.
Creates a executor ready to execute the specified task.
Args:
task (CompilationTask): The task to execute.
"""
if not isinstance(task, CompilationTask):
raise TypeError(f'Expected a CompilationTask, got {type(task)}.')
self.task_id = task.task_id
self.circuit = task.input_circuit
self.passes = task.passes
self.data: dict[str, Any] = {'executor': self}
self.done = False
def run(self) -> tuple[Circuit, dict[str, Any]]:
"""Execute the task."""
with threadpool_limits(limits=1):
for pass_obj in self.passes:
pass_obj.run(self.circuit, self.data)
self.done = True
return self.circuit, self.data
| 28.95122
| 77
| 0.647009
|
4a1519a3c95748db8880456e9189936225af30ff
| 13,508
|
py
|
Python
|
onyx/pack_manager.py
|
Command-Master/Onyx
|
03dfb88b833c431a9bb2a1322bfc91bb0d265ac6
|
[
"MIT"
] | null | null | null |
onyx/pack_manager.py
|
Command-Master/Onyx
|
03dfb88b833c431a9bb2a1322bfc91bb0d265ac6
|
[
"MIT"
] | null | null | null |
onyx/pack_manager.py
|
Command-Master/Onyx
|
03dfb88b833c431a9bb2a1322bfc91bb0d265ac6
|
[
"MIT"
] | null | null | null |
import os
import time
import json
import shutil
from typing import Union
from .handler import Handler
from .enums import tag_type
class function:
"""Defines a standalone function without a data pack (libs and things will NOT work)
Args:
path (str): The path of the function (raw path, include extension)
"""
def __init__(self, path):
self._path = path
Handler(None, None, None, None)
def __enter__(self):
return self
def __exit__(self, excpt_type, excpt_value, traceback):
with open(self._path, "w") as _file:
_file.write('\n'.join(Handler._cmds) + "\n")
class Function:
def __init__(self, datapack_path, datapack_name, function_path, function_name, loop):
self._datapack_path = datapack_path
self._datapack_name = datapack_name
# Define the function path and create it if it doesn't exist
self._function_path = os.path.join(datapack_path, "data", datapack_name, "functions", os.path.normpath(function_path))
os.makedirs(self._function_path, exist_ok=True)
# Store the function path that is used by the game (pack_name:function/path/here)
q = self._function_path.split(os.sep)
q = q[q.index('functions') + 1:]
if q[0] == ".":
self._mcfunction_path = f"{self._datapack_name}:{function_name}"
else:
self._mcfunction_path = f"{self._datapack_name}:{f'/'.join(q)}/{function_name}"
# The file name itself is added after the directory is created to avoid a folder called "xyz.mcfunction" being created
self._function_path = os.path.join(self._function_path, function_name + ".mcfunction")
# Initalize the Handler
Handler(self._function_path, self._mcfunction_path, self._datapack_path, self._datapack_name)
if loop:
# Make the tick.json directory if it doesn't exist
tick_dir = os.path.join(datapack_path, "data", "minecraft", "tags", "functions")
os.makedirs(tick_dir, exist_ok=True)
# Get the tick.json contents and set default values if they don't exist
try:
with open(os.path.join(tick_dir, "tick.json"), "r") as tick_json:
current_data = json.load(tick_json)
except FileNotFoundError:
current_data = {"values": []}
# Update (or create) the file
with open(os.path.join(tick_dir, "tick.json"), "w") as tick_json:
# Get the function path, split it, and then keep only everything past /data/namespace/functions/
# Add the data to the list and dump it
current_data["values"].append(f"{self._mcfunction_path}")
json.dump(current_data, tick_json, indent=4)
Handler._status(f"Added function to tick.json: {self._mcfunction_path}")
def __enter__(self):
Handler._active_func = self._function_path
return self
def __exit__(self, excpt_type, excpt_value, traceback):
Handler._write_function()
class pack:
"""Defines a new datapack
Args:
path (str): The path of the new datapack
override (bool, optional): Whether or not the old datapack should be deleted. Defaults to True.
disable_status_messages (bool, optional): Whether or not status messages should be displayed. Defaults to False.
"""
def __init__(self, path: str, override: bool = True, disable_status_messages: bool = False):
self._start_time = time.time()
self._datapack_path = os.path.normpath(path)
self._datapack_name = os.path.basename(os.path.normpath(self._datapack_path)).lower().replace(" ", "_")
Handler._disable_status = disable_status_messages
if override:
shutil.rmtree(self._datapack_path, ignore_errors=True)
# Generate file structure
path = os.path.join(self._datapack_path, "data", self._datapack_name, "functions")
os.makedirs(path, exist_ok=True)
# Generate pack.mcmeta
with open(os.path.join(self._datapack_path, "pack.mcmeta"), "w+") as mc_meta_file:
mc_meta_data = {
"pack": {
"pack_format": 5,
"description": f"{self._datapack_name}"
}
}
json.dump(mc_meta_data, mc_meta_file, indent=4)
with open(os.path.join(self._datapack_path, "SIGNATURE"), "wb") as _file:
_file.write(bytearray(b'\x02\x06\x00\x02\x01\t\x07\x04\x02\x08\x08\x02\x05\x05\x00\x01\x00\x01\x06\x00\x03\x06\x03\x03\x01\x04\x05\x07\x06\x00\x03\x08\x04\x03\x03\x01\x01\x04\x01\x07\x05\x03\x01\x03\x08\x00\x04\x08\t\x01\x04\x06\x01\x00\x02\x05\t\x04\x07\x06\t\t\x07\x05\x05\x01\x04\x06\t\x07\x03\x07\x08\x01\x05\x03\x00\x02\x03\x04\x03\x01\x08\x07\x03\x07\x05\x05\x00\x01\x02\t\x05\x08\x04\t\x01\x02\x05\x03\x03\x07\x01\x01\x01\t\x07\x06\x07\x03\x00\x08\x05\x08\x03\t\x01\x06\x08\x07\x02\x07\x08\x05\x01\x03\x05\x08\x06\x03\x08\x04\x07\x00\x03\x04\x04\x02\t\x06\x07\x00\x05\x08\x07\x06\x00\x03\x06\t\x01\x01\x04\x01\x05\x03\x08\x07\x07\t\t\x01\x07\x08\x01\x02\x00\t\t\x02\x05\x08\t\x05\x06\x00\x06\t\x02\x07\x03\x04\x07\x06\x05\x03\x01\x05\x01\x08\x07\x07\x03\x01\x02\t\x02\x06\x06\x08\x08\t\x05\t\x01\x07\x07\x08\x00\x04\t\x00\x06\x06\x04\x02\x07\x05\t\x07\x01\x03\x06\x07\x06\x05\x02\x06\x04\t\x02\x04\x04\x00\x07\x03\t\x00\x03\x03\x07\x03\x07\x03\x07\x06\t\x00\t\x02\x08\x01\x02\t\x03\x08\x04\t\x07\x02\x05\t\t\x04\x00\x03\x03\t\x05\t\x00\x08\x00\x01\x04\x06\x08\t\x05\x05\x04\t\x06\x04\x02\x05\x03\x04\x03\x03\x03\x04\x02\x08\x01\x01\x06\x01\x04\x05\x03\x08\x07\t\t\x04\x04\x01\x04\x04\x01\t\x01\x07\x06\x05\x00\x00'))
Handler._status(f"Removed old datapack: {self._datapack_name}")
Handler._status(f"Created new datapack: {self._datapack_name}")
def get_generation_time(self, decimal_places: int = 3):
"""Prints the time it took to generate the datapack
Args:
decimal_places (int, optional): The amount of decimal places the result should be rounded to. Defaults to 3.
"""
Handler._status(f"Datapack '{self._datapack_name}' took {round(time.time() - self._start_time, decimal_places)} seconds to generate")
def function(self, function_path: str, function_name: str, loop=False):
"""Defines a new function
Args:
function_path (str): The path of the function relative to the datapack ``functions`` folder
function_name (str): The name of the function
loop (bool, optional): Whether or not the function should be added to tick.json. Defaults to False.
"""
return Function(self._datapack_path, self._datapack_name, function_path, function_name, loop)
def _multi_data(self, data_type, path, name, data, overwrite=False):
if overwrite:
namespace = "minecraft"
else:
namespace = self._datapack_name
fileless_path = os.path.join(self._datapack_path, "data", namespace, data_type, os.path.normpath(path))
os.makedirs(fileless_path, exist_ok=True)
# Get the path that the game uses for this data type
q = fileless_path.split(os.sep)
q = q[q.index(data_type) + 1:]
if q[0] == ".":
mc_path = f"{self._datapack_name}:{name}"
else:
mc_path = f"{self._datapack_name}:{f'/'.join(q)}/{name}"
data_path = os.path.join(fileless_path, name + ".json")
with open(data_path, "w") as _file:
json.dump(data, _file, indent=4)
Handler._status(f"Created {data_type[:-1]}: {mc_path}")
return mc_path
def add_advancement(self, path: str, name: str, data: dict, overwrite: bool = False):
"""Adds an advancement to the datapack from raw JSON data
Args:
path (str): The location of the advancement relative to ``namespace/advancements``. Specify the advancement name in the ``name`` parameter.
name (str): The name of the advancement
data (dict): The JSON data of the advancement
overwrite (bool, optional): Whether or not the advancement should be put in the ``minecraft`` namespace. Defaults to False.
Returns:
str: The path of the advancement that minecraft uses (``namespace:pa/th/name``)
"""
return self._multi_data("advancement", path, name, data)
def add_loot_table(self, path: str, name: str, data: dict, overwrite: bool = False):
"""Adds a loot table to the datapack from raw JSON data
Args:
path (str): The location of the loot table relative to ``namespace/loot_tables``. Specify the loot table name in the ``name`` parameter.
name (str): The name of the loot table
data (dict): The JSON data of the loot table
overwrite (bool, optional): Whether or not the loot table should be put in the ``minecraft`` namespace. Defaults to False.
Returns:
str: The path of the loot table that minecraft uses (``namespace:pa/th/name``)
"""
return self._multi_data("loot_tables", path, name, data)
def add_predicate(self, path: str, name: str, data: dict, overwrite: bool = False):
"""Adds a predicate to the datapack from raw JSON data
Args:
path (str): The location of the predicate relative to ``namespace/predicates``. Specify the predicate name in the ``name`` parameter.
name (str): The name of the predicate
data (dict): The JSON data of the predicate
overwrite (bool, optional): Whether or not the predicate should be put in the ``minecraft`` namespace. Defaults to False.
Returns:
str: The path of the predicate that minecraft uses (``namespace:pa/th/name``)
"""
return self._multi_data("predicates", path, name, data)
def add_recipe(self, path: str, name: str, data: dict, overwrite: bool = False):
"""Adds a recipe to the datapack from raw JSON data
Args:
path (str): The location of the recipe relative to ``namespace/recipes``. Specify the recipe name in the ``name`` parameter.
name (str): The name of the recipe
data (dict): The JSON data of the recipe
overwrite (bool, optional): Whether or not the recipe should be put in the ``minecraft`` namespace. Defaults to False.
Returns:
str: The path of the recipe that minecraft uses (``namespace:pa/th/name``)
"""
return self._multi_data("recipes", path, name, data)
def load_structure(self, path: str, name: str, location: str, overwrite: bool = False):
"""Copies a structure into the datapack
Args:
path (str): The location of the structure relative to ``namespace/structures``. Specify the structure name in the ``name`` parameter.
name (str): The name of the structure
location (str): The file path of the structure to load
overwrite (bool, optional): Whether or not the structure should be loaded into the ``minecraft`` namespace. Defaults to False.
"""
if not location.endswith(".nbt"):
location += ".nbt"
if overwrite:
namespace = "minecraft"
else:
namespace = self._datapack_name
fileless_path = os.path.join(self._datapack_path, "data", namespace, "structures", os.path.normpath(path))
os.makedirs(fileless_path, exist_ok=True)
shutil.copyfile(location, os.path.join(fileless_path, name + ".nbt"))
def add_tag(self, type: tag_type, path, name, data: Union[dict, list], overwrite: bool = False):
"""Adds a tag to the datapack from raw JSON data or a list of elements
Args:
type (tag_type): The type of the tag (block, entity, fluid, function, item)
path (str): The location of the tag relative to ``namespace/tags``. Specify the tag name in the ``name`` parameter.
name (str): The name of the tag
data (Union[dict, list]): The data of the tag. If ``data`` is a dictionary, the raw JSON data is loaded. If ``data`` is a list, the list will be set to the ``values`` element of the tag.
overwrite (bool, optional): Whether or not the tag should be loaded into the ``minecraft`` namespace. Defaults to False.
"""
if overwrite:
namespace = "minecraft"
else:
namespace = self._datapack_name
fileless_path = os.path.join(self._datapack_path, "data", namespace, "tags", os.path.normpath(path))
os.makedirs(fileless_path, exist_ok=True)
# Get the path that the game uses for this tag
q = fileless_path.split(os.sep)
q = q[q.index("tags") + 1:]
if q[0] == ".":
mc_path = f"#{namespace}:{name}"
else:
mc_path = f"#{namespace}:{f'/'.join(q)}/{name}"
tag_path = os.path.join(fileless_path, name + ".json")
if isinstance(data, dict):
with open(tag_path, "w") as _file:
json.dump(data, _file, indent=4)
elif isinstance(data, list):
q = [Handler._translate(y) for y in data]
with open(tag_path, "w") as _file:
json.dump({"values": q}, _file, indent=4)
Handler._status(f"Created tag: {mc_path}")
return mc_path
| 48.765343
| 1,235
| 0.635549
|
4a151aafb2e2aa2cfa2f466a741ddbc2937f7ba4
| 8,298
|
py
|
Python
|
credentialdigger/generator/training.py
|
SabrinaKall/credential-digger
|
372b25aa19c7c04a2c2a8f385f4875f66de73af1
|
[
"Apache-2.0"
] | null | null | null |
credentialdigger/generator/training.py
|
SabrinaKall/credential-digger
|
372b25aa19c7c04a2c2a8f385f4875f66de73af1
|
[
"Apache-2.0"
] | null | null | null |
credentialdigger/generator/training.py
|
SabrinaKall/credential-digger
|
372b25aa19c7c04a2c2a8f385f4875f66de73af1
|
[
"Apache-2.0"
] | null | null | null |
import json
import pkg_resources
import random
import shutil
from pathlib import Path
import fasttext
import numpy as np
def _create_model_folder(url):
""" Create the folder for the new model.
The new folder is created in the `models_data` folder, i.e., at the same
location of the other models (that can be installed independently from this
one).
Structure of the `models_data` folder.
```
models_data
|- path_model (not mandatory)
|- snippet_model (independent from this)
`- snippet_model_author (created with this method)
```
Parameters
----------
url: str
The url of the repository
Returns
-------
`pathlib.Path`
The path of the model folder
Raises
------
FileExistsError
If the model folder already exists
"""
# The model name is the name of the author of the repository
model_name = 'snippet_model_%s' % url.split('/')[-2]
# Get the models_data folder of credentialdigger
models_data = Path(pkg_resources.resource_filename('credentialdigger',
'models_data'))
# Create model folder. If the model already exists, its folder is already
# present at this path. In this case, a FileExistsError is raised by the
# instruction mkdir
local_model = models_data / model_name
local_model.mkdir()
return local_model
def _fill_model_structure(model_folder, model_name, model_version='1.0.0'):
""" Fill the model folder with subfolders and metafiles.
Every snippet model has a folder that has to respect the following
structure (the snippet_model itself is independent from the one we generate
in this process, and must be installed independently).
```
models_data
|- ...
`- snippet_model_author
|- __init__.py
|- meta.json
`- snippet_model_author-1.0.0
`- extractor.bin
```
Parameters
----------
model_folder: `pathlib.Path`
The path of the folder of this model
model_name: str
The name of the model (i.e., the name of the folder)
model_version: str, optional
The version of the model (default `1.0.0`)
Returns
-------
`pathlib.Path`
The path of the inner folder, where the binaries will be dropped (i.e.,
the folder identified as `snippet_model_author-version`)
"""
def create_model_meta(folder, model_name, version):
metafile = {
'name': model_name,
'version': version,
'credentialdigger_version': '>=1.0.0',
'parent_package': 'credentialdigger',
'description': 'SnippetModel extractor from ExtractorGenerator',
'author': 'credentialdigger-generated',
'email': 'contact@example.com',
'url': 'https://example.com',
'license': 'Apache2'
}
jsonfile = model_folder / 'meta.json'
with open(jsonfile, 'w') as f:
json.dump(metafile, f)
# __init__.py
open(model_folder / '__init__.py', 'w').close()
# meta.json
create_model_meta(model_folder, model_name, model_version)
# Subfolder snippet_model_author-1.0.0
inner_folder = model_folder / ('%s-%s' % (model_name, model_version))
inner_folder.mkdir()
# Return subfolder (it is here that will be dropped the binary model)
return inner_folder
def _generate_training_dataset(training_data, temp_ds_path):
""" Pre-process the training data.
Parameters
----------
training_data: `pandas.DataFrame`
The training data obtained from the Leak Generator
temp_ds_path: `pathlib.Path`
The path of the temp folder containing the dataset
Returns
-------
train_file: str
The path of the training file
valid_file: str
The path of the validation file
"""
training_data_size = int(round(len(training_data) * 0.99, 2))
to_write = []
for idx, row in training_data.iterrows():
# Find indexes of key and value
try:
text = row.text.split()
idx_key = np.where(np.array(text) == np.array(row.key))[0]
idx_value = np.where(np.array(text) == np.array(row.value))[0]
# Consider the first key and the last value
output = '__label__%s __label__%s %s' % (idx_key[0],
idx_value[-1],
row.text)
to_write.append(output)
except IndexError:
# Should never occur since all the patterns have at least one key
# and at least one value
print(row)
print('Text is missing either the key or the value.',
'Skip this pattern.')
random.shuffle(to_write)
# Store training files until supervised learning step
train_file = temp_ds_path / 'extractor.train'
valid_file = temp_ds_path / 'extractor.valid'
with open(train_file, 'w') as f:
for out in to_write[:training_data_size]:
f.write(out + '\n')
with open(valid_file, 'w') as f:
for out in to_write[training_data_size:]:
f.write(out + '\n')
# Return path to train and valid extractors
return str(train_file), str(valid_file)
def _train_model(input_ds, valid_ds, learning_rate=0.1, epoch_model=50,
word_ngrams=5, word_vector_dim=100, context_window=5):
""" Train the model with Fasttext and pre-processed data.
Only the extractor model of the SnippetModel is trained.
Parameters
----------
input_ds: str
The path of the training dataset
valid_ds: str
The path of the validation dataset
learning_rate: float, optional
The learning rate (default `0.1`)
epoch_model: int
The number of epochs (default `50`)
word_ngrams: int
The max length of word ngram (default `5`)
word_vector_dim: int
The size of word vectors (default `100`)
context_window: int
The size of the context window (default `5`)
Returns
-------
`fasttext.FastText._FastText`
The model object
"""
model = fasttext.train_supervised(input=input_ds,
lr=learning_rate,
epoch=epoch_model,
wordNgrams=word_ngrams,
dim=word_vector_dim,
ws=context_window,
loss='ova')
print(model.test(valid_ds))
return model
def create_snippet_model(training_data, repo_url):
""" Train and save the extractor for the Snippet Model of this repo.
All the repositories of the same author can use the same extractor. Indeed,
we assume that the stylometry of an author doesn't change.
Parameters
----------
training_data: `pandas.DataFrame`
Pandas DataFrame obtained through the Leak Generator
repo_url: str
The url of the repository
Returns
-------
str
The name of the model folder
str
The name of the binary for the extractor model
Raises
------
FileExistsError
If the model already exists
"""
# Create folder for the model
# It raises a FileExistsError if the folder (thus, the model) already
# exists
model_folder = _create_model_folder(repo_url)
# Fill folder structure
extractor_folder = _fill_model_structure(model_folder,
model_folder.name)
# Create a temp folder for storing the temporary datasets
temp_folder = model_folder / 'temp'
temp_folder.mkdir()
# Output the training data and the test data for fasttext
train, valid = _generate_training_dataset(training_data, temp_folder)
# Train the model
model = _train_model(train, valid)
# Save model
extractor_bin = extractor_folder / 'model_extractor_adapted.bin'
model.save_model(str(extractor_bin))
# Remove temp folder (with its files)
shutil.rmtree(temp_folder)
# For being ready to use in the SnippetModel class
return model_folder.name, extractor_bin.name
| 32.03861
| 79
| 0.618221
|
4a151b3a7262243315b3ee7c1b65f1d6a2e636b1
| 370
|
py
|
Python
|
src/analysis/analyzer.py
|
JeesubKim/stocker
|
a68cb1295f51b046371de62eaf6182870377003c
|
[
"MIT"
] | null | null | null |
src/analysis/analyzer.py
|
JeesubKim/stocker
|
a68cb1295f51b046371de62eaf6182870377003c
|
[
"MIT"
] | null | null | null |
src/analysis/analyzer.py
|
JeesubKim/stocker
|
a68cb1295f51b046371de62eaf6182870377003c
|
[
"MIT"
] | null | null | null |
# 0. 장대 양봉 나온 종목ㅡ역대급으로 터진 거래량 즉 스마트머니
# 1. 장기간 횡보 종목 (hysterisys 내에 있는 종목?)
# 돈이 몰려있는곳에서만 해라. --> 돈이 어디 몰려있는지 체크해줄것
# 뉴스 해석 -> 시작에 돈이 몰려있다
# 거래량, 거래대금!!!!
# 결국 이평선이 중요하지 않고, 정배열 차트 종목 중에서 뉴스나 재료가 있는애들이 결국 더 가기때문에 걔들을 본다
# !!!!분봉상에서 상승 추세인 상황에서 횡보하다가 전고점 돌파시에 매매 시도
# 일봉에서는 전고점 돌파시에는 던지는게 좋을듯(개인적인 관점)
# 특정 종목에서 작살났다고 복구하려고 하지마라
# 어제 작살난 종몸ㄱ에 대한 보상을 받으려고 하지마라
| 18.5
| 65
| 0.648649
|
4a151bb17295cd3bebaafc81b4a5fe80dcfeff88
| 1,330
|
py
|
Python
|
visualization/grill/door_baselines.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | 1
|
2020-10-23T14:40:09.000Z
|
2020-10-23T14:40:09.000Z
|
visualization/grill/door_baselines.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
visualization/grill/door_baselines.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | 1
|
2021-05-27T20:38:45.000Z
|
2021-05-27T20:38:45.000Z
|
import matplotlib
from visualization.grill.config import (
output_dir,
ashvin_base_dir,
format_func,
configure_matplotlib,
)
import matplotlib.pyplot as plt
from rlkit.visualization import plot_util as plot
configure_matplotlib(matplotlib)
dirs = [
ashvin_base_dir + 's3doodad/share/camera_ready_door',
]
exps = plot.load_exps(dirs, suppress_output=True)
plot.comparison(exps, ["Final angle_difference Mean"],
[
# "seed",
"exp_prefix",
],
default_vary={"env_kwargs.randomize_position_on_reset": True},
smooth=plot.padded_ma_filter(10),
print_final=False, print_min=False, print_plot=True,
xlim=(0, 200000),
# ylim=(0, 0.35),
figsize=(6, 4),
method_order=(3, 2, 4, 0, 1),
)
plt.gca().xaxis.set_major_formatter(plt.FuncFormatter(format_func))
plt.xlabel("Timesteps")
plt.ylabel("Final Distance to Goal")
plt.legend([])
# [],
# [
# "RIG",
# "DSAE",
# "HER",
# "Oracle",
# "L&R",
# ],
# # bbox_to_anchor=(0.49, -0.2), loc="upper center", ncol=5, handlelength=1)
# bbox_to_anchor=(1.0, 0.5), loc="center left",
# )
plt.tight_layout()
plt.title("Visual Door Baselines")
plt.savefig(output_dir + "door_baselines_viz.pdf")
| 27.142857
| 87
| 0.614286
|
4a151bb87b317b56a7723988942a895b2c306e33
| 76
|
py
|
Python
|
jdaviz/configs/specviz/__init__.py
|
check-spelling/jdaviz
|
bfd0514d13bdc6fa0b8c8536a603293409270337
|
[
"MIT",
"BSD-3-Clause"
] | 55
|
2019-05-24T18:53:05.000Z
|
2022-03-14T08:45:52.000Z
|
jdaviz/configs/specviz/__init__.py
|
check-spelling/jdaviz
|
bfd0514d13bdc6fa0b8c8536a603293409270337
|
[
"MIT",
"BSD-3-Clause"
] | 1,105
|
2019-05-09T15:17:35.000Z
|
2022-03-31T21:22:18.000Z
|
jdaviz/configs/specviz/__init__.py
|
rosteen/jdaviz
|
e02c08d68ef71c5e40600785f46e65e5ae95e236
|
[
"MIT",
"BSD-3-Clause"
] | 49
|
2019-05-07T18:05:42.000Z
|
2022-03-22T15:15:34.000Z
|
from .plugins import * # noqa
from .helper import Specviz, SpecViz # noqa
| 25.333333
| 44
| 0.723684
|
4a151bd41f3098a4c86c811efdd4b74d3569fd3a
| 451
|
py
|
Python
|
settings.py
|
qiongzui/Alien_invasion
|
7c5dcab9c7e9ba367eb6fe0936d099f1c2dc99ca
|
[
"Apache-2.0"
] | null | null | null |
settings.py
|
qiongzui/Alien_invasion
|
7c5dcab9c7e9ba367eb6fe0936d099f1c2dc99ca
|
[
"Apache-2.0"
] | null | null | null |
settings.py
|
qiongzui/Alien_invasion
|
7c5dcab9c7e9ba367eb6fe0936d099f1c2dc99ca
|
[
"Apache-2.0"
] | null | null | null |
class Settings():
"""存储《外星人入侵》的所有设置的类"""
def __init__(self):
"""初始化游戏的设置"""
#屏幕设置
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230,230,230)
#飞船的设置
self.ship_speed_factor = 1.5
#子弹设置
self.bullet_speed_factor = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60 , 60 ,60
self.bullets_allowed = 3
| 22.55
| 39
| 0.552106
|
4a151c06cee81c502740200eb3f2b08f7820394e
| 5,112
|
py
|
Python
|
main.py
|
rushic24/tradingview-scraper
|
0f0b0b745214409eef1aabf87a4f7be86e60005e
|
[
"MIT"
] | 163
|
2020-09-09T12:13:00.000Z
|
2022-03-26T20:05:30.000Z
|
main.py
|
saarsa/tradingview-scraper
|
0f0b0b745214409eef1aabf87a4f7be86e60005e
|
[
"MIT"
] | 16
|
2020-10-25T23:15:27.000Z
|
2022-02-22T00:42:02.000Z
|
main.py
|
saarsa/tradingview-scraper
|
0f0b0b745214409eef1aabf87a4f7be86e60005e
|
[
"MIT"
] | 74
|
2020-09-09T12:22:34.000Z
|
2022-03-24T04:09:03.000Z
|
from websocket import create_connection
import json
import random
import string
import re
import pandas as pd
import csv
from datetime import datetime
from time import localtime
def filter_raw_message(text):
try:
found = re.search('"m":"(.+?)",', text).group(1)
found2 = re.search('"p":(.+?"}"])}', text).group(1)
print(found)
print(found2)
return found1, found2
except AttributeError:
print("error")
def generateSession():
stringLength=12
letters = string.ascii_lowercase
random_string= ''.join(random.choice(letters) for i in range(stringLength))
return "qs_" +random_string
def generateChartSession():
stringLength=12
letters = string.ascii_lowercase
random_string= ''.join(random.choice(letters) for i in range(stringLength))
return "cs_" +random_string
def prependHeader(st):
return "~m~" + str(len(st)) + "~m~" + st
def constructMessage(func, paramList):
#json_mylist = json.dumps(mylist, separators=(',', ':'))
return json.dumps({
"m":func,
"p":paramList
}, separators=(',', ':'))
def createMessage(func, paramList):
return prependHeader(constructMessage(func, paramList))
def sendRawMessage(ws, message):
ws.send(prependHeader(message))
def sendMessage(ws, func, args):
ws.send(createMessage(func, args))
def generate_csv(a):
out= re.search('"s":\[(.+?)\}\]', a).group(1)
x=out.split(',{\"')
with open('data_file.csv', mode='w', newline='') as data_file:
employee_writer = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
employee_writer.writerow(['index', 'date', 'open', 'high', 'low', 'close', 'volume'])
for xi in x:
xi= re.split('\[|:|,|\]', xi)
print(xi)
ind= int(xi[1])
ts= datetime.fromtimestamp(float(xi[4])).strftime("%Y/%m/%d, %H:%M:%S")
employee_writer.writerow([ind, ts, float(xi[5]), float(xi[6]), float(xi[7]), float(xi[8]), float(xi[9])])
# add txt output file
def create_output_file():
now = localtime()
fname = f"{now[0]}-{now[1]}-{now[2]}.txt"
return fname
# Initialize the headers needed for the websocket connection
headers = json.dumps({
# 'Connection': 'upgrade',
# 'Host': 'data.tradingview.com',
'Origin': 'https://data.tradingview.com'
# 'Cache-Control': 'no-cache',
# 'Upgrade': 'websocket',
# 'Sec-WebSocket-Extensions': 'permessage-deflate; client_max_window_bits',
# 'Sec-WebSocket-Key': '2C08Ri6FwFQw2p4198F/TA==',
# 'Sec-WebSocket-Version': '13',
# 'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36 Edg/83.0.478.56',
# 'Pragma': 'no-cache',
# 'Upgrade': 'websocket'
})
# Then create a connection to the tunnel
ws = create_connection(
'wss://data.tradingview.com/socket.io/websocket',headers=headers)
session= generateSession()
print("session generated {}".format(session))
chart_session= generateChartSession()
print("chart_session generated {}".format(chart_session))
# Then send a message through the tunnel
sendMessage(ws, "set_auth_token", ["unauthorized_user_token"])
sendMessage(ws, "chart_create_session", [chart_session, ""])
sendMessage(ws, "quote_create_session", [session])
sendMessage(ws,"quote_set_fields", [session,"ch","chp","current_session","description","local_description","language","exchange","fractional","is_tradable","lp","lp_time","minmov","minmove2","original_name","pricescale","pro_name","short_name","type","update_mode","volume","currency_code","rchp","rtc"])
sendMessage(ws, "quote_add_symbols",[session, "NASDAQ:AAPL", {"flags":['force_permission']}])
sendMessage(ws, "quote_fast_symbols", [session,"NASDAQ:AAPL"])
#st='~m~140~m~{"m":"resolve_symbol","p":}'
#p1, p2 = filter_raw_message(st)
sendMessage(ws, "resolve_symbol", [chart_session,"symbol_1","={\"symbol\":\"NASDAQ:AAPL\",\"adjustment\":\"splits\",\"session\":\"extended\"}"])
sendMessage(ws, "create_series", [chart_session, "s1", "s1", "symbol_1", "1", 5000])
#sendMessage(ws, "create_study", [chart_session,"st4","st1","s1","ESD@tv-scripting-101!",{"text":"BNEhyMp2zcJFvntl+CdKjA==_DkJH8pNTUOoUT2BnMT6NHSuLIuKni9D9SDMm1UOm/vLtzAhPVypsvWlzDDenSfeyoFHLhX7G61HDlNHwqt/czTEwncKBDNi1b3fj26V54CkMKtrI21tXW7OQD/OSYxxd6SzPtFwiCVAoPbF2Y1lBIg/YE9nGDkr6jeDdPwF0d2bC+yN8lhBm03WYMOyrr6wFST+P/38BoSeZvMXI1Xfw84rnntV9+MDVxV8L19OE/0K/NBRvYpxgWMGCqH79/sHMrCsF6uOpIIgF8bEVQFGBKDSxbNa0nc+npqK5vPdHwvQuy5XuMnGIqsjR4sIMml2lJGi/XqzfU/L9Wj9xfuNNB2ty5PhxgzWiJU1Z1JTzsDsth2PyP29q8a91MQrmpZ9GwHnJdLjbzUv3vbOm9R4/u9K2lwhcBrqrLsj/VfVWMSBP","pineId":"TV_SPLITS","pineVersion":"8.0"}])
# Printing all the result
a=""
outfilename = create_output_file()
while True:
try:
result = ws.recv()
print(result)
# a=a+result+"\n"
with open(outfilename,"a") as ww:
ww.write(result)
ww.close()
except Exception as e:
print(e)
break
generate_csv(a)
| 38.727273
| 596
| 0.672926
|
4a151c1e239d0376e9a45fd6c1924f5b686bc7cf
| 20,146
|
py
|
Python
|
MFGPextreme/core/optimaldesign.py
|
umbrellagong/MFGPextreme
|
bedbe03740c585af179dbe3ebbee68fbf9986d51
|
[
"MIT"
] | 1
|
2022-01-14T02:03:45.000Z
|
2022-01-14T02:03:45.000Z
|
MFGPextreme/core/optimaldesign.py
|
umbrellagong/MFGPextreme
|
bedbe03740c585af179dbe3ebbee68fbf9986d51
|
[
"MIT"
] | null | null | null |
MFGPextreme/core/optimaldesign.py
|
umbrellagong/MFGPextreme
|
bedbe03740c585af179dbe3ebbee68fbf9986d51
|
[
"MIT"
] | null | null | null |
import copy
import numpy as np
from scipy import optimize
from sklearn.base import clone
from joblib import Parallel, delayed
from emukit.multi_fidelity.convert_lists_to_array import convert_x_list_to_array
from .metrics import log_pdf
class OptimalDesign(object):
'''
Single-fidelity Bayesian experimental design.
Parameters
-----------
f: func
The black-box function with input as parameter and output as return
input: instance of Inputs class
Include bounds, pdf, sampling pool or GMM approximations.
Attributes
----------
DX : array (n_samples, n_dim)
The input of the samples.
DY : array (n_samples,)
The output of the samples.
'''
def __init__(self, f, inputs):
self.f = f
self.inputs = inputs
def init_sampling(self, n_init):
'''
Generate initial samples.
Parameters
----------
n_init: int
The number of initial samples.
'''
self.DX = self.inputs.sampling(n_init)
self.DY = self.f(self.DX) # a vector
return self
def seq_sampling(self, n_seq, acq, model, n_starters=30, n_jobs=6,
jac=True):
'''
Generate sequential samples.
Parameters
----------
n_seq: int
The number of sequential samples.
acq: instance of Acq class
Represent the design of acq (or the objective of problem).
model: instance of GaussianProcessRegressor
The learned Gaussian process regressor from samples.
n_starters: int
Number of restarts for the optimizer.
n_jobs: int
The number of workers used by joblib for parallel computation.
jac: bool
Whether to use derivative information
Return
----------
model_list: list of GaussianProcessRegressor instance
The trained gpr at each iteration.
'''
self.acq = copy.copy(acq)
self.model = clone(model)
self.model_list = []
for i in range(n_seq):
self.model.fit(self.DX, self.DY)
# The number of samples are assumed to be small, thus we have the
# surrogates in memory.
self.model_list.append(copy.deepcopy(self.model))
self.acq.update_prior_search(self.model)
init = self.inputs.sampling(n_starters)
res = Parallel(n_jobs=n_jobs)(delayed(optimize.minimize)
(self.acq.compute_value,
init[j], method="L-BFGS-B",
bounds = self.inputs.domain,
jac = jac, options={'gtol': 1e-3})
for j in range(init.shape[0]))
opt_pos = res[np.argmin([k.fun for k in res])].x
self.DX = np.append(self.DX, np.atleast_2d(opt_pos), axis=0)
self.DY = np.append(self.DY, self.f(opt_pos))
# train the last model
self.model.fit(self.DX, self.DY)
self.model_list.append(copy.deepcopy(self.model))
return self.model_list
################################################################################
class OptimalDesignTF(object):
'''
Multi-fidelity Bayesian optimal design.
Parameters
-----------
f_h, f_l: func
The high and low fidelity black-box function with input as parameter and
output as return
input: instance of Inputs class
Include bounds, pdf, sampling pool or GMM approximations.
Attributes
----------
DX : array (n_samples, n_dim + 1)
The input of the samples sorted with decreasing fidelity. The last
feature is the fidelity level with 1 and 0 respectively representing
high and low-fidelity.
DY : array (n_samples,)
The output of the samples.
'''
def __init__(self, f_h, f_l, inputs):
self.f_h = f_h
self.f_l = f_l
self.inputs = inputs
def load_data(self, DX):
''' Start from existing dataset DX.
'''
idx_low = np.where(DX[:,2]==0)[0][0]
DX_h = DX[:idx_low][:,:2]
DY_h = self.f_h(DX_h)
DX_l = DX[idx_low:][:,:2]
DY_l = self.f_l(DX_l)
DX = convert_x_list_to_array([DX_l, DX_h])
DY = np.append(DY_l, DY_h)
self.DX = np.flip(DX, axis=0)
self.DY = np.flip(DY)
return self
def init_sampling(self, n_init_h, n_init_l):
'''Generate initial samples.
Parameters
-----------
n_init_h, n_init_l: int
number of high and low-fidelity initial samples
'''
DX_h = self.inputs.sampling(n_init_h)
DY_h = self.f_h(DX_h)
DX_l = self.inputs.sampling(n_init_l)
DY_l = self.f_l(DX_l)
DX = convert_x_list_to_array([DX_l, DX_h])
DY = np.append(DY_l, DY_h)
self.DX = np.flip(DX, axis=0)
self.DY = np.flip(DY)
return self
def seq_sampling_fixed(self, n_seq, n_ratio, acq, model,
n_starters=30, n_jobs=6):
'''Generate sequential samples corresponding to MF-F.
Parameters
-----------
n_seq: int
number of sequential iterations (including one high-fidelity and
n_ratio low-fidelity samples).
n_ratio: int
ratio of low/high-fidelity samples in each iteration.
acq, model, n_starters, n_jobs: see OptimalDesign
Return
----------
model_list: list of GaussianProcessRegressor instance
The trained gpr at each iteration.
'''
self.acq = copy.deepcopy(acq)
self.model = copy.deepcopy(model)
self.model_list = []
for ii in range(n_seq):
fidelity = 1
for j in range(n_ratio+1):
self.model.fit(self.DX, self.DY)
self.model_list.append(copy.deepcopy(self.model))
self.acq.update_prior_search(self.model)
init = self.inputs.sampling(n_starters)
res = Parallel(n_jobs=n_jobs)(delayed(optimize.minimize)
(self.acq.compute_value_tf_cost,
init[j],
args=(fidelity,1),
method="L-BFGS-B",
jac=True,
bounds = self.inputs.domain,
options={'gtol': 1e-3})
for j in range(init.shape[0]))
self.res = res
opt_pos = res[np.argmin([k.fun for k in res])].x
if fidelity==1:
self.DX = np.insert(self.DX, 0,
np.append(opt_pos, 1), axis=0)
self.DY = np.insert(self.DY, 0, self.f_h(opt_pos))
fidelity = 0
else:
self.DX = np.insert(self.DX, self.DX.shape[0],
np.append(opt_pos, 0), axis=0)
self.DY = np.append(self.DY, self.f_l(opt_pos))
self.model.fit(self.DX, self.DY)
self.model_list.append(copy.deepcopy(self.model))
return self.model_list
def seq_sampling_opt(self, n_seq, n_cost, c_ratio, acq, model,
n_starters=12, n_jobs=6):
'''Generate sequential samples corresponding to MF-O.
Parameters
-----------
n_seq: int
number of sequential samples.
n_cost: float
cost limit
c_ratio: float
ratio of high/low-fidelity costs
acq, model, n_starters, n_jobs: see OptimalDesign
Return
----------
model_list: list of GaussianProcessRegressor instance
The trained gpr at each iteration.
'''
self.acq = copy.deepcopy(acq)
self.model = copy.deepcopy(model)
self.model_list = []
for ii in range(n_seq):
self.model.fit(self.DX, self.DY)
self.model_list.append(copy.deepcopy(self.model))
self.acq.update_prior_search(self.model)
init = self.inputs.sampling(n_starters)
res_l = Parallel(n_jobs=n_jobs)(delayed(optimize.minimize)
(self.acq.compute_value_tf_cost,
init[j],
args=(0, 1), # low-fidelity
method="L-BFGS-B",
jac=True,
bounds = self.inputs.domain,
options={'gtol': 1e-3})
for j in range(init.shape[0]))
self.res_l = res_l
opt_pos_l = res_l[np.argmin([k.fun for k in res_l])].x
opt_value_l = res_l[np.argmin([k.fun for k in res_l])].fun
res_h = Parallel(n_jobs=n_jobs)(delayed(optimize.minimize)
(self.acq.compute_value_tf_cost,
init[j],
args=(1, c_ratio),
method="L-BFGS-B",
jac=True,
bounds = self.inputs.domain,
options={'gtol': 1e-3})
for j in range(init.shape[0]))
self.res_h = res_h
opt_pos_h = res_h[np.argmin([k.fun for k in res_h])].x
opt_value_h = res_h[np.argmin([k.fun for k in res_h])].fun
if opt_value_h < opt_value_l: # high-fidelity sampling!
self.DX = np.insert(self.DX, 0,
np.append(opt_pos_h, 1), axis=0)
self.DY = np.insert(self.DY, 0, self.f_h(opt_pos_h))
else:
self.DX = np.insert(self.DX, self.DX.shape[0],
np.append(opt_pos_l, 0), axis=0)
self.DY = np.append(self.DY, self.f_l(opt_pos_l))
num_h_X = np.count_nonzero(self.DX[:,-1]==1)
num_l_X = np.count_nonzero(self.DX[:,-1]==0)
cost = num_h_X + 1 / c_ratio * num_l_X
if cost > n_cost:
break
self.model.fit(self.DX, self.DY)
self.model_list.append(copy.deepcopy(self.model))
return self.model_list
########################################################
class OptimalDesignTF_light(OptimalDesignTF):
''' Same as the father class.
'''
def init_sampling(self, n_init_h, n_init_l, seed=0):
'''Generate initial samples.
Parameters
-----------
n_init_h, n_init_l: int
number of high and low-fidelity initial samples
seed: int,
random seed
'''
self.n_init_h = n_init_h
self.n_init_l = n_init_l
self.seed = seed
np.random.seed(seed)
DX_h = self.inputs.sampling(n_init_h)
DX_l = self.inputs.sampling(n_init_l)
DY_h = self.f_h(DX_h)
DY_l = self.f_l(DX_l)
DX = convert_x_list_to_array([DX_l, DX_h])
DY = np.append(DY_l, DY_h)
self.DX = np.flip(DX, axis=0)
self.DY = np.flip(DY)
return self
def seq_sampling_fixed(self, n_seq, n_ratio, n_cost, c_ratio, acq, model,
n_starters=12, n_jobs=6, opt_hyper_threshold=500,
validation=True, FILENAME='now', **metric_kwargs):
'''Generate sequential samples corresponding to MF-F.
The results, in terms of cost and errors, are directly saved to
FILENAME. The trained model and dataset will not be saved.
Parameters
-----------
n_seq: int
number of sequential iterations (including one high-fidelity and
n_ratio low-fidelity samples).
n_ratio: int
ratio of low/high-fidelity samples in each iteration.
n_cost: float
cost limit
c_ratio: float
ratio of high/low-fidelity costs
opt_hyper_threshold: float
The hyper-parameters of the surrogate will be optimized every 5
samples after the total cost exceeding this limit.
validation: bool
If it is a validation case, the error w.r.t. exact pdf will be
computed.
FILENAME: str
The name of folder to save computed errors in validation case.
metric_kwargs:
keyword parameters to compute the error.
'''
self.acq = copy.deepcopy(acq)
self.model = copy.deepcopy(model)
cost = self.n_init_h + self.n_init_l/c_ratio
for ii in range(n_seq):
fidelity = 1
for j in range(n_ratio+1):
if self.DX.shape[0]<opt_hyper_threshold or self.DX.shape[0]%5==0:
self.model.fit(self.DX, self.DY)
else:
self.model.fit_keep(self.DX, self.DY)
if validation:
# writedown the error if we have a exact pdf.
error = np.log10(log_pdf([self.model], **metric_kwargs)[0])
result = np.array([[ii, cost, error]])
with open(FILENAME + '/' + str(self.seed) +'.out', 'a') as f:
np.savetxt(f, result, fmt='%1.4f')
self.acq.update_prior_search(self.model)
init = self.inputs.sampling(n_starters)
res = Parallel(n_jobs=n_jobs)(delayed(optimize.minimize)
(self.acq.compute_value_tf_cost,
init[j],
args=(fidelity,1),
method="L-BFGS-B",
jac=True,
bounds = self.inputs.domain,
options={'gtol': 1e-3})
for j in range(init.shape[0]))
self.res = res
opt_pos = res[np.argmin([k.fun for k in res])].x
if fidelity==1:
self.DX = np.insert(self.DX, 0,
np.append(opt_pos, 1), axis=0)
self.DY = np.insert(self.DY, 0, self.f_h(opt_pos))
cost = cost + 1
# the MF-F assume one high-fidelity in each iteration
fidelity = 0
else:
self.DX = np.insert(self.DX, self.DX.shape[0],
np.append(opt_pos, 0), axis=0)
self.DY = np.append(self.DY, self.f_l(opt_pos))
cost = cost + 1 / c_ratio
if validation:
self.model.fit(self.DX, self.DY)
error = np.log10(log_pdf([self.model], **metric_kwargs)[0])
result = np.array([[ii, cost, error]])
with open(FILENAME + '/' + str(self.seed) +'.out', 'a') as f:
np.savetxt(f, result)
return None
def seq_sampling_opt(self, n_seq, n_ratio, n_cost, c_ratio, acq, model,
n_starters=12, n_jobs=6, opt_hyper_threshold=500,
validation=True, FILENAME='now', **metric_kwargs):
'''Same as seq_sampling_fixed.
'''
self.acq = copy.deepcopy(acq)
self.model = copy.deepcopy(model)
cost = self.n_init_h + self.n_init_l/c_ratio
for ii in range(n_seq):
if self.DX.shape[0]<opt_hyper_threshold or self.DX.shape[0]%5==0:
self.model.fit(self.DX, self.DY)
else:
self.model.fit_keep(self.DX, self.DY)
if validation:
error = np.log10(log_pdf([self.model], **metric_kwargs)[0])
result = np.array([[ii, cost, error]])
with open(FILENAME + '/' + str(self.seed) +'.out', 'a') as f:
np.savetxt(f, result, fmt='%1.4f')
self.acq.update_prior_search(self.model)
init = self.inputs.sampling(n_starters)
# compute the low-fidelity sub-optimal sample
res_l = Parallel(n_jobs=n_jobs)(delayed(optimize.minimize)
(self.acq.compute_value_tf_cost,
init[j],
args=(0, 1), # low-fidelity
method="L-BFGS-B",
jac=True,
bounds = self.inputs.domain,
options={'gtol': 1e-3})
for j in range(init.shape[0]))
self.res_l = res_l
opt_pos_l = res_l[np.argmin([k.fun for k in res_l])].x
opt_value_l = res_l[np.argmin([k.fun for k in res_l])].fun
# compute the high-fidelity sub-optimal sample
res_h = Parallel(n_jobs=n_jobs)(delayed(optimize.minimize)
(self.acq.compute_value_tf_cost,
init[j],
args=(1, c_ratio),
method="L-BFGS-B",
jac=True,
bounds = self.inputs.domain,
options={'gtol': 1e-3})
for j in range(init.shape[0]))
self.res_h = res_h
opt_pos_h = res_h[np.argmin([k.fun for k in res_h])].x
opt_value_h = res_h[np.argmin([k.fun for k in res_h])].fun
if opt_value_h < opt_value_l: # high-fidelity sampling!
self.DX = np.insert(self.DX, 0,
np.append(opt_pos_h, 1), axis=0)
self.DY = np.insert(self.DY, 0, self.f_h(opt_pos_h))
cost = cost + 1
else:
self.DX = np.insert(self.DX, self.DX.shape[0],
np.append(opt_pos_l, 0), axis=0)
self.DY = np.append(self.DY, self.f_l(opt_pos_l))
cost = cost + 1 / c_ratio
if cost > n_cost:
break
if validation:
self.model.fit(self.DX, self.DY)
error = np.log10(log_pdf([self.model], **metric_kwargs)[0])
result = np.array([[ii, cost, error]])
with open(FILENAME + '/' + str(self.seed) +'.out', 'a') as f:
np.savetxt(f, result)
return None
| 42.50211
| 82
| 0.465254
|
4a151c7d884d5dbcfa224857f0a838c0e5dd92bf
| 1,639
|
py
|
Python
|
src/mergeResult.py
|
Sapphirine/bestbuy-recommend
|
b165f1eb8961b5ab77be34ab0d8ee13aec3bcb95
|
[
"MIT"
] | null | null | null |
src/mergeResult.py
|
Sapphirine/bestbuy-recommend
|
b165f1eb8961b5ab77be34ab0d8ee13aec3bcb95
|
[
"MIT"
] | null | null | null |
src/mergeResult.py
|
Sapphirine/bestbuy-recommend
|
b165f1eb8961b5ab77be34ab0d8ee13aec3bcb95
|
[
"MIT"
] | 2
|
2019-05-24T09:58:09.000Z
|
2021-03-22T10:43:25.000Z
|
import csv
from collections import defaultdict
import operator
if __name__ == "__main__":
filenames = ['../data/result/frequency.csv', '../data/result/predictionsSVC.csv', '../data/result/content.csv']
results = [[],[],[]]
for i in xrange(3):
with open(filenames[i], 'r') as f:
reader = csv.reader(f)
for row in reader:
results[i].append(row)
scores = [1.0, 0.7, 0.5, 0.4, 0.3]
weights = [1.0, 1.1, 0.1]
finalResult = []
for i in xrange(len(results[0])): # for every query
skuScore = defaultdict(float)
for j in xrange(3): # for every method
skus = results[j][i]
for k in xrange(len(skus)): # for every single predict
sku = skus[k]
skuScore[sku] += scores[k] * weights[j] # since it's a defautdict, just add the score
# sort the dict by its score, find the top 5 skus
sortedSkus = sorted(skuScore.items(), key=operator.itemgetter(1), reverse=True)
sortedSkus = sortedSkus[0:5]
skus = []
for x in xrange(5): # save the top5 result
skus.append(sortedSkus[x][0])
finalResult.append(skus)
trueSkus = []
with open('../data/psudo_test2.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
trueSkus.append(row[1])
# write to files
# with open('../data/result/final-result.csv','w') as f:
# writer = csv.writer(f)
# for row in finalResult:
# writer.writerow(row)
total = 0.0
score = 0.0
tmp = 0.0
for i in xrange(len(finalResult)):
total += 1
trueSku = trueSkus[i]
predict = finalResult[i]
if trueSku in predict:
tmp += 1
score += 1.0 / (predict.index(trueSku) + 1)
print "precision: " + str(tmp/total)
print "score: " + str(score/total)
| 27.779661
| 112
| 0.643075
|
4a151c890e2dc7e93c257ce2cba2237f75e1ceb8
| 1,834
|
py
|
Python
|
markdown/extensions/delete.py
|
overlord/sublimetext-markdown-preview
|
a060dc4fabebbcf1a12de8e6bf48b7bcc888eef7
|
[
"MIT"
] | null | null | null |
markdown/extensions/delete.py
|
overlord/sublimetext-markdown-preview
|
a060dc4fabebbcf1a12de8e6bf48b7bcc888eef7
|
[
"MIT"
] | null | null | null |
markdown/extensions/delete.py
|
overlord/sublimetext-markdown-preview
|
a060dc4fabebbcf1a12de8e6bf48b7bcc888eef7
|
[
"MIT"
] | null | null | null |
"""
Mdownx.delete
An extension for Python Markdown.
Adds support for ~~strike~~ => <del>strike</del>
MIT license.
Copyright (c) 2014 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Modified to work with Sublime Markdown Preview
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from ..extensions import Extension
from ..inlinepatterns import SimpleTagPattern
RE_DEL = r"(\~{2})(.+?)\2"
class DeleteExtension(Extension):
"""Adds delete extension to Markdown class."""
def extendMarkdown(self, md, md_globals):
"""Add support for <del>test</del> tags as ~~test~~"""
md.ESCAPED_CHARS.append('~')
md.inlinePatterns.add("del", SimpleTagPattern(RE_DEL, "del"), "<not_strong")
def makeExtension(*args, **kwargs):
return DeleteExtension(*args, **kwargs)
| 48.263158
| 460
| 0.763904
|
4a151e28b1cc680e7e2fdc517526311c0e26ef1e
| 796
|
py
|
Python
|
mysite/myapp/forms.py
|
trbarnes/CollegeLife
|
14b9ef0b9623c23ed1eb053c90290c04f657c051
|
[
"MIT"
] | null | null | null |
mysite/myapp/forms.py
|
trbarnes/CollegeLife
|
14b9ef0b9623c23ed1eb053c90290c04f657c051
|
[
"MIT"
] | null | null | null |
mysite/myapp/forms.py
|
trbarnes/CollegeLife
|
14b9ef0b9623c23ed1eb053c90290c04f657c051
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class SuggestionForm(forms.Form):
suggestion_field = forms.CharField(label='New Post', max_length=1000)
class CommentForm(forms.Form):
comment_field = forms.CharField(label='Comment', max_length=240)
class RegistrationForm(UserCreationForm):
email = forms.EmailField(
label="Email",
required=True
)
class Meta:
model = User
fields = ("username", "email",
"password1", "password2")
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
| 28.428571
| 73
| 0.655779
|
4a151e7a262c08202cfea477fa288a28b7cdddc8
| 7,772
|
py
|
Python
|
kitsune/users/tests/test_es.py
|
AndrewDVXI/kitsune
|
84bd4fa60346681c3fc5a03b0b1540fd1335cee2
|
[
"BSD-3-Clause"
] | 1
|
2021-07-18T00:41:16.000Z
|
2021-07-18T00:41:16.000Z
|
kitsune/users/tests/test_es.py
|
AndrewDVXI/kitsune
|
84bd4fa60346681c3fc5a03b0b1540fd1335cee2
|
[
"BSD-3-Clause"
] | 9
|
2021-04-08T22:05:53.000Z
|
2022-03-12T00:54:11.000Z
|
kitsune/users/tests/test_es.py
|
Whoerr/kitsune
|
2428573b4920a824c3e712b8a4870f8c1ada8f64
|
[
"BSD-3-Clause"
] | 1
|
2020-07-28T15:52:46.000Z
|
2020-07-28T15:52:46.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.core.management import call_command
from nose.tools import eq_
from kitsune.customercare.tests import ReplyFactory
from kitsune.questions.tests import AnswerFactory
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.users.models import UserMappingType
from kitsune.users.tests import ProfileFactory, UserFactory
from kitsune.wiki.tests import RevisionFactory
class UserSearchTests(ElasticTestCase):
def test_add_and_delete(self):
"""Adding a user with a profile should add it to the index.
Deleting should delete it.
"""
p = ProfileFactory()
self.refresh()
eq_(UserMappingType.search().count(), 1)
p.user.delete()
self.refresh()
eq_(UserMappingType.search().count(), 0)
def test_data_in_index(self):
"""Verify the data we are indexing."""
u = UserFactory(username="r1cky", email="r@r.com", profile__name="Rick Róss")
r1 = ReplyFactory(user=u, twitter_username="r1cardo")
r2 = ReplyFactory(user=u, twitter_username="r1cky")
self.refresh()
eq_(UserMappingType.search().count(), 1)
data = UserMappingType.search()[0]
eq_(data["username"], u.username)
eq_(data["display_name"], u.profile.name)
assert r1.twitter_username in data["twitter_usernames"]
assert r2.twitter_username in data["twitter_usernames"]
u = UserFactory(username="willkg", email="w@w.com", profile__name="Will Cage")
self.refresh()
eq_(UserMappingType.search().count(), 2)
def test_suggest_completions(self):
u1 = UserFactory(username="r1cky", profile__name="Rick Róss")
u2 = UserFactory(username="Willkg", profile__name="Will Cage")
self.refresh()
eq_(UserMappingType.search().count(), 2)
results = UserMappingType.suggest_completions("wi")
eq_(1, len(results))
eq_("Will Cage (Willkg)", results[0]["text"])
eq_(u2.id, results[0]["payload"]["user_id"])
results = UserMappingType.suggest_completions("R1")
eq_(1, len(results))
eq_("Rick Róss (r1cky)", results[0]["text"])
eq_(u1.id, results[0]["payload"]["user_id"])
# Add another Ri....
UserFactory(username="richard", profile__name="Richard Smith")
self.refresh()
eq_(UserMappingType.search().count(), 3)
results = UserMappingType.suggest_completions("ri")
eq_(2, len(results))
texts = [r["text"] for r in results]
assert "Rick Róss (r1cky)" in texts
assert "Richard Smith (richard)" in texts
results = UserMappingType.suggest_completions("Rick Ró")
eq_(1, len(results))
texts = [r["text"] for r in results]
eq_("Rick Róss (r1cky)", results[0]["text"])
def test_suggest_completions_numbers(self):
u1 = UserFactory(username="1337mike", profile__name="Elite Mike")
UserFactory(username="crazypants", profile__name="Crazy Pants")
self.refresh()
eq_(UserMappingType.search().count(), 2)
results = UserMappingType.suggest_completions("13")
eq_(1, len(results))
eq_("Elite Mike (1337mike)", results[0]["text"])
eq_(u1.id, results[0]["payload"]["user_id"])
def test_query_username_with_numbers(self):
u = UserFactory(username="1337miKE", profile__name="Elite Mike")
UserFactory(username="mike", profile__name="NotElite Mike")
self.refresh()
eq_(UserMappingType.search().query(iusername__match="1337mike").count(), 1)
data = UserMappingType.search().query(iusername__match="1337mike")[0]
eq_(data["username"], u.username)
eq_(data["display_name"], u.profile.name)
def test_query_display_name_with_whitespace(self):
UserFactory(username="1337miKE", profile__name="Elite Mike")
UserFactory(username="mike", profile__name="NotElite Mike")
self.refresh()
eq_(UserMappingType.search().count(), 2)
eq_(UserMappingType.search().query(idisplay_name__match_whitespace="elite").count(), 1)
def test_query_twitter_usernames(self):
u1 = UserFactory(username="1337miKE", profile__name="Elite Mike")
u2 = UserFactory(username="mike", profile__name="NotElite Mike")
r1 = ReplyFactory(user=u1, twitter_username="l33tmIkE")
ReplyFactory(user=u2, twitter_username="mikey")
self.refresh()
eq_(UserMappingType.search().query(itwitter_usernames__match="l33tmike").count(), 1)
data = UserMappingType.search().query(itwitter_usernames__match="l33tmike")[0]
eq_(data["username"], u1.username)
eq_(data["display_name"], u1.profile.name)
assert r1.twitter_username in data["twitter_usernames"]
def test_last_contribution_date(self):
"""Verify the last_contribution_date field works properly."""
u = UserFactory(username="satdav")
self.refresh()
data = UserMappingType.search().query(username__match="satdav")[0]
assert not data["last_contribution_date"]
# Add a AoA reply. It should be the last contribution.
d = datetime(2014, 1, 1)
ReplyFactory(user=u, created=d)
self.refresh()
data = UserMappingType.search().query(username__match="satdav")[0]
eq_(data["last_contribution_date"], d)
# Add a Support Forum answer. It should be the last contribution.
d = datetime(2014, 1, 2)
AnswerFactory(creator=u, created=d)
u.profile.save() # we need to resave the profile to force a reindex
self.refresh()
data = UserMappingType.search().query(username__match="satdav")[0]
eq_(data["last_contribution_date"], d)
# Add a Revision edit. It should be the last contribution.
d = datetime(2014, 1, 3)
RevisionFactory(created=d, creator=u)
u.profile.save() # we need to resave the profile to force a reindex
self.refresh()
data = UserMappingType.search().query(username__match="satdav")[0]
eq_(data["last_contribution_date"], d)
# Add a Revision review. It should be the last contribution.
d = datetime(2014, 1, 4)
RevisionFactory(reviewed=d, reviewer=u)
u.profile.save() # we need to resave the profile to force a reindex
self.refresh()
data = UserMappingType.search().query(username__match="satdav")[0]
eq_(data["last_contribution_date"], d)
def test_reindex_users_that_contributed_yesterday(self):
yesterday = datetime.now() - timedelta(days=1)
# Verify for answers.
u = UserFactory(username="answerer")
AnswerFactory(creator=u, created=yesterday)
call_command("reindex_users_that_contributed_yesterday")
self.refresh()
data = UserMappingType.search().query(username__match="answerer")[0]
eq_(data["last_contribution_date"].date(), yesterday.date())
# Verify for edits.
u = UserFactory(username="editor")
RevisionFactory(creator=u, created=yesterday)
call_command("reindex_users_that_contributed_yesterday")
self.refresh()
data = UserMappingType.search().query(username__match="editor")[0]
eq_(data["last_contribution_date"].date(), yesterday.date())
# Verify for reviews.
u = UserFactory(username="reviewer")
RevisionFactory(reviewer=u, reviewed=yesterday)
call_command("reindex_users_that_contributed_yesterday")
self.refresh()
data = UserMappingType.search().query(username__match="reviewer")[0]
eq_(data["last_contribution_date"].date(), yesterday.date())
| 38.098039
| 95
| 0.65929
|
4a151ff16fc8f0d2dc2294c288d2abe089f851d2
| 2,398
|
py
|
Python
|
setup.py
|
dizzy21c/QUANTAXIS_RealtimeCollector
|
bb55a3852fe437b51ce92a9cbbc4e89c5e317f6c
|
[
"MIT"
] | null | null | null |
setup.py
|
dizzy21c/QUANTAXIS_RealtimeCollector
|
bb55a3852fe437b51ce92a9cbbc4e89c5e317f6c
|
[
"MIT"
] | null | null | null |
setup.py
|
dizzy21c/QUANTAXIS_RealtimeCollector
|
bb55a3852fe437b51ce92a9cbbc4e89c5e317f6c
|
[
"MIT"
] | null | null | null |
import codecs
import io
import os
import re
import sys
import webbrowser
import platform
import configparser
try:
from setuptools import setup
except:
from distutils.core import setup
"""
"""
if sys.version_info.major != 3 or sys.version_info.minor not in [4, 5, 6, 7, 8]:
print('wrong version, should be 3.4/3.5/3.6/3.7/3.8 version')
sys.exit()
with io.open('QARealtimeCollector/__init__.py', 'rt', encoding='utf8') as f:
context = f.read()
VERSION = re.search(r'__version__ = \'(.*?)\'', context).group(1)
AUTHOR = re.search(r'__author__ = \'(.*?)\'', context).group(1)
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
NAME = "qarealtime_collector"
"""
"""
PACKAGES = ["QARealtimeCollector", "QARealtimeCollector.collectors",
"QARealtimeCollector.clients", "QARealtimeCollector.datahandler"]
"""
"""
DESCRIPTION = "QARealtimeCollector: QUANTAXIS REALTIME MARKETDATA COLLECTORS"
KEYWORDS = ["quantaxis", "quant", "finance", "Backtest", 'Framework']
"""
"""
AUTHOR_EMAIL = "yutiansut@qq.com"
URL = "https://github.com/yutiansut/QUANTAXIS_RealtimeCollector"
LICENSE = "MIT"
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=DESCRIPTION,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
],
install_requires=['quantaxis', 'quantaxis_pubsub',
'quantaxis-otgbroker', 'quantaxis-randomprice','quantaxis_webserver'],
entry_points={
'console_scripts': [
'QARC_Start = QARealtimeCollector.__init__:start',
'QARC_Resample = QARealtimeCollector.__init__:resample',
'QARC_Random = QARealtimeCollector.__init__:random',
'QARC_CTP = QARealtimeCollector.__init__:start_ctp',
'QARC_Stock = QARealtimeCollector.__init__:stock_collector',
'QARC_Stock2 = QARealtimeCollector.__init__:stock_collector_ext',
'QARC_WEBSERVER = QARealtimeCollector.webserver:main'
]
},
# install_requires=requirements,
keywords=KEYWORDS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=LICENSE,
packages=PACKAGES,
include_package_data=True,
zip_safe=True
)
| 26.94382
| 92
| 0.673478
|
4a15217b7889b54055e6d58e3da4fb58ebe776ee
| 8,410
|
py
|
Python
|
improver_tests/utilities/cube_manipulation/test_enforce_coordinate_ordering.py
|
nivnac/improver
|
c16c794f62598017cebc6ae4f99af8f317219a77
|
[
"BSD-3-Clause"
] | null | null | null |
improver_tests/utilities/cube_manipulation/test_enforce_coordinate_ordering.py
|
nivnac/improver
|
c16c794f62598017cebc6ae4f99af8f317219a77
|
[
"BSD-3-Clause"
] | 3
|
2020-04-25T12:55:42.000Z
|
2020-07-23T11:50:46.000Z
|
improver_tests/utilities/cube_manipulation/test_enforce_coordinate_ordering.py
|
Kat-90/improver
|
a5c31be3430df429ae38e7c16e267fcbc2af1858
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the function "cube_manipulation.enforce_coordinate_ordering".
"""
import unittest
import numpy as np
from iris.cube import Cube
from iris.tests import IrisTest
from improver.metadata.constants.time_types import TIME_COORDS
from improver.synthetic_data.set_up_test_cubes import (
add_coordinate,
set_up_probability_cube,
set_up_variable_cube,
)
from improver.utilities.cube_manipulation import enforce_coordinate_ordering
class Test_enforce_coordinate_ordering(IrisTest):
"""Test the enforce_coordinate_ordering utility."""
def setUp(self):
"""Set up cube with non-homogeneous data to test with"""
data = np.arange(27).reshape((3, 3, 3)) + 275
cube = set_up_variable_cube(data.astype(np.float32))
time_points = [
cube.coord("time").points[0],
cube.coord("time").points[0] + 3600,
]
self.cube = add_coordinate(
cube,
time_points,
"time",
coord_units=TIME_COORDS["time"].units,
dtype=np.int64,
order=[1, 0, 2, 3],
)
def test_move_coordinate_to_start_when_already_at_start(self):
"""Test that a cube with the expected data contents is returned when
the coordinate to be reordered is already in the desired position."""
expected = self.cube.copy()
enforce_coordinate_ordering(self.cube, "realization")
self.assertEqual(self.cube.coord_dims("realization")[0], 0)
self.assertArrayAlmostEqual(self.cube.data, expected.data)
def test_move_coordinate_to_start(self):
"""Test that a cube with the expected data contents is returned when
the time coordinate is reordered to be the first coordinate in the
cube."""
expected = self.cube.copy()
expected.transpose([1, 0, 2, 3])
enforce_coordinate_ordering(self.cube, "time")
self.assertEqual(self.cube.coord_dims("time")[0], 0)
# test associated aux coord is moved along with time dimension
self.assertEqual(self.cube.coord_dims("forecast_period")[0], 0)
self.assertArrayAlmostEqual(self.cube.data, expected.data)
def test_move_coordinate_to_end(self):
"""Test that a cube with the expected data contents is returned when
the realization coordinate is reordered to be the last coordinate in
the cube."""
expected = self.cube.copy()
expected.transpose([1, 2, 3, 0])
enforce_coordinate_ordering(self.cube, "realization", anchor_start=False)
self.assertEqual(self.cube.coord_dims("realization")[0], 3)
self.assertArrayAlmostEqual(self.cube.data, expected.data)
def test_move_coordinate_to_start_with_list(self):
"""Test that a cube with the expected data contents is returned when
the time coordinate is reordered to be the first coordinate in the
cube."""
expected = self.cube.copy()
expected.transpose([1, 0, 2, 3])
enforce_coordinate_ordering(self.cube, ["time"])
self.assertEqual(self.cube.coord_dims("time")[0], 0)
self.assertArrayAlmostEqual(self.cube.data, expected.data)
def test_move_multiple_coordinate_to_start_with_list(self):
"""Test that a cube with the expected data contents is returned when
the time and realization coordinates are reordered to be the first
coordinates in the cube."""
expected = self.cube.copy()
expected.transpose([1, 0, 2, 3])
enforce_coordinate_ordering(self.cube, ["time", "realization"])
self.assertEqual(self.cube.coord_dims("time")[0], 0)
self.assertEqual(self.cube.coord_dims("realization")[0], 1)
self.assertArrayAlmostEqual(self.cube.data, expected.data)
def test_move_multiple_coordinate_to_end_with_list(self):
"""Test that a cube with the expected data contents is returned when
the time and realization coordinates are reordered to be the last
coordinates in the cube. The coordinate name to be reordered is
specified as a list."""
expected = self.cube.copy()
expected.transpose([2, 3, 1, 0])
enforce_coordinate_ordering(
self.cube, ["time", "realization"], anchor_start=False
)
self.assertEqual(self.cube.coord_dims("time")[0], 2)
self.assertEqual(self.cube.coord_dims("realization")[0], 3)
self.assertArrayAlmostEqual(self.cube.data, expected.data)
def test_full_reordering(self):
"""Test that a cube with the expected data contents is returned when
all the coordinates within the cube are reordered into the order
specified by the names within the input list."""
expected = self.cube.copy()
expected.transpose([2, 0, 3, 1])
enforce_coordinate_ordering(
self.cube, ["latitude", "realization", "longitude", "time"]
)
self.assertEqual(self.cube.coord_dims("latitude")[0], 0)
self.assertEqual(self.cube.coord_dims("realization")[0], 1)
self.assertEqual(self.cube.coord_dims("longitude")[0], 2)
self.assertEqual(self.cube.coord_dims("time")[0], 3)
self.assertArrayAlmostEqual(self.cube.data, expected.data)
def test_include_extra_coordinates(self):
"""Test that a cube with the expected data contents is returned when
extra coordinates are passed in for reordering but these coordinates
are not present within the cube."""
expected = self.cube.copy()
expected.transpose([1, 0, 2, 3])
enforce_coordinate_ordering(self.cube, ["time", "realization", "nonsense"])
self.assertEqual(self.cube.coord_dims("time")[0], 0)
self.assertEqual(self.cube.coord_dims("realization")[0], 1)
self.assertArrayAlmostEqual(self.cube.data, expected.data)
def test_no_impact_scalar(self):
"""Test that a cube with the expected data contents is returned when
reordered on a scalar coordinate."""
cube = self.cube[0, :, :, :]
expected = cube.copy()
enforce_coordinate_ordering(cube, "realization")
self.assertFalse(cube.coord_dims("realization"))
self.assertArrayAlmostEqual(cube.data, expected.data)
def test_handles_threshold(self):
"""Test a probability cube is correctly handled"""
thresholds = np.array([278, 279, 280], dtype=np.float32)
data = 0.03 * np.arange(27).reshape((3, 3, 3))
cube = set_up_probability_cube(data.astype(np.float32), thresholds)
enforce_coordinate_ordering(cube, ["threshold"], anchor_start=False)
self.assertEqual(cube.coord_dims("air_temperature")[0], 2)
if __name__ == "__main__":
unittest.main()
| 46.208791
| 83
| 0.687634
|
4a152211d5385836f5c68a5ac97bf01125157a66
| 58
|
py
|
Python
|
testing.py
|
RyanAugust/new-and-readworthy
|
3253d906902f36f64386f30b822dc58115eaf233
|
[
"MIT"
] | null | null | null |
testing.py
|
RyanAugust/new-and-readworthy
|
3253d906902f36f64386f30b822dc58115eaf233
|
[
"MIT"
] | null | null | null |
testing.py
|
RyanAugust/new-and-readworthy
|
3253d906902f36f64386f30b822dc58115eaf233
|
[
"MIT"
] | null | null | null |
import requests
import sqlite
def read_source_pages():
| 9.666667
| 24
| 0.793103
|
4a1522fbd7d193805ca23f5c38632f3e3806fa3b
| 1,838
|
py
|
Python
|
anomaly_detection/api/middleware/auth.py
|
kumarashit/anomaly-detection
|
1b0aab03197ee690248cfed6fd67c8f4b533bb91
|
[
"Apache-2.0"
] | 6
|
2019-04-15T12:10:07.000Z
|
2019-10-21T14:41:39.000Z
|
anomaly_detection/api/middleware/auth.py
|
kumarashit/anomaly-detection
|
1b0aab03197ee690248cfed6fd67c8f4b533bb91
|
[
"Apache-2.0"
] | 3
|
2019-04-23T14:38:32.000Z
|
2019-07-27T21:32:51.000Z
|
anomaly_detection/api/middleware/auth.py
|
kumarashit/anomaly-detection
|
1b0aab03197ee690248cfed6fd67c8f4b533bb91
|
[
"Apache-2.0"
] | 5
|
2019-04-15T12:10:08.000Z
|
2020-02-07T03:45:25.000Z
|
# Copyright 2019 The OpenSDS Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from werkzeug.wrappers.request import Request
from anomaly_detection.context import RequestContext
NO_AUTH_ADMIN_TENANT_ID = 'admin_tenant'
class NoAuthMiddleWare(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, start_response):
req = Request(environ)
# FIXME: Any other good idea for this.
if req.path in ['/', '/v1beta', '/v1beta/']:
return self._app(environ, start_response)
if 'X-Auth-Token' not in req.headers:
headers = [('Content-Type', 'text/plain')]
start_response('400 Bad Request', headers)
return [b'X-Auth-Token not found in header']
token = req.headers['X-Auth-Token']
user_id, _sep, tenant_id = token.partition(':')
tenant_id = tenant_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
environ["anomaly_detection.context"] = RequestContext(user_id,
tenant_id,
is_admin=True,
remote_address=remote_address)
return self._app(environ, start_response)
| 39.956522
| 92
| 0.626224
|
4a152302a51c1f629c0b29da38fe1c163e2de613
| 1,546
|
py
|
Python
|
LAB05_MustafaCankanBALCI/Lab05MustafaCankanmodule.py
|
MustiCankan/PythonProjectsCS
|
61994d8cbcd3119b2e9bb6de2d73b7776d83bb41
|
[
"MIT"
] | null | null | null |
LAB05_MustafaCankanBALCI/Lab05MustafaCankanmodule.py
|
MustiCankan/PythonProjectsCS
|
61994d8cbcd3119b2e9bb6de2d73b7776d83bb41
|
[
"MIT"
] | null | null | null |
LAB05_MustafaCankanBALCI/Lab05MustafaCankanmodule.py
|
MustiCankan/PythonProjectsCS
|
61994d8cbcd3119b2e9bb6de2d73b7776d83bb41
|
[
"MIT"
] | null | null | null |
def load_movies(file_movies):
"""
The storing the movies by their years from csv file
Parameters:
file_movies (func):file pointer
returns
dictionary: return hs
"""
hs = {}
for line in file_movies:
list_file_movies = line.split(",")
movie_year = int(list_file_movies[0])
movie = list_file_movies[1][:-1]
if movie_year in hs:
hs[movie_year].append(movie)
else:
hs[movie_year] = [movie]
return hs
def get_movies_by_year(hs_movie, year):
"""
Showing the movies that released that year
Parameters:
hs_movie (dic): information about movies
year (int): The year that movie released
returns:
list: return
"""
if year in hs_movie:
return hs_movie[year]
else:
return []
def get_movies_by_keyword(hs_movie, enter_val):
"""
Showing the movies that consists of entered word
Parameters:
hs_movie (dic): information about movies
enter_val (str): Entered keyword
returns:
list: return list_tuple
"""
list_tuple = []
for year in hs_movie:
for movie_name in hs_movie[year]:
search = movie_name.find(enter_val)
if search != -1:
list_tuple.append((year, movie_name))
return list_tuple
def print_list(enter_list):
"""
Printing elements in the list
Parameters:
enter_list (list): given list
Returns:
return None
"""
for i in enter_list:
print(i)
| 20.613333
| 55
| 0.605433
|
4a1524930e0af16b0eb9a80798eda9413db98d83
| 7,257
|
py
|
Python
|
cheshire3/web/sruWsgi.py
|
cheshire3/cheshire3
|
306348831ec110229c78a7c5f0f2026a0f394d2c
|
[
"Python-2.0",
"Unlicense"
] | 3
|
2015-08-02T09:03:28.000Z
|
2017-12-06T09:26:14.000Z
|
cheshire3/web/sruWsgi.py
|
cheshire3/cheshire3
|
306348831ec110229c78a7c5f0f2026a0f394d2c
|
[
"Python-2.0",
"Unlicense"
] | 5
|
2015-08-17T01:16:35.000Z
|
2015-09-16T21:51:27.000Z
|
cheshire3/web/sruWsgi.py
|
cheshire3/cheshire3
|
306348831ec110229c78a7c5f0f2026a0f394d2c
|
[
"Python-2.0",
"Unlicense"
] | 6
|
2015-05-17T15:32:20.000Z
|
2020-04-22T08:43:16.000Z
|
"""SRU WSGI Application."""
from urllib import quote
from cgi import FieldStorage
from sruHandler import *
class SRUWsgiHandler(SRUProtocolHandler):
"""SRU Request Handling Class for WSGI."""
def _app_base(self, environ):
scheme = environ['wsgi.url_scheme']
url = [scheme]
url.append('://')
try:
url.append(environ['HTTP_HOST'])
except KeyError:
url.append(environ['SERVER_NAME'])
if scheme == 'httpd':
if environ['SERVER_PORT'] != '443':
url.append(':')
url.append(environ['SERVER_PORT'])
else:
if environ['SERVER_PORT'] != '80':
url.append(':')
url.append(environ['SERVER_PORT'])
url.append(quote(environ.get('SCRIPT_NAME', '')))
return ''.join(url)
def _path_base(self, environ):
url = [self._app_base(environ)]
url.append(quote(environ.get('PATH_INFO', '')))
return ''.join(url)
def _reconstruct_url(self, environ):
url = [self._path_base(environ)]
if environ.get('QUERY_STRING'):
url.append('?')
url.append(environ['QUERY_STRING'])
return ''.join(url)
def __call__(self, environ, start_response):
session = self.session
path = '/'.join([
environ.get('SCRIPT_NAME', '').strip('/'),
environ.get('PATH_INFO', '').strip('/')
])
out = []
if path not in configs:
# Unknown endpoint
# No specification
out.append(
'<databases numberOfDatabases="{0}">'.format(len(configs))
)
for k in sorted(configs.keys()):
out.append("<database><path>{0}</path></database>".format(k))
out.append('</databases>')
else:
dbconf = configs[path]
if isinstance(dbconf, tuple):
dbid = dbconf[0]
db = serv.get_object(session, dbid)
config = db.get_object(
session,
dbconf[1]['http://www.loc.gov/zing/srw/']
)
else:
config = dbconf['http://www.loc.gov/zing/srw/']
# Check db hasn't changed since instantiated
db = config.parent
# Attempt to find filepath for db metadata
fp = db.get_path(session, 'metadataPath')
if os.stat(fp).st_mtime > db.initTime:
# Rediscover objects
dbid = db.id
del db
try:
del serv.objects[dbid]
except KeyError:
pass
try:
del serv.databases[dbid]
except KeyError:
pass
db = serv.get_object(session, dbid)
session.path = self._path_base(environ)
session.config = config
store = FieldStorage(fp=environ['wsgi.input'], environ=environ)
opts = {}
for qp in store.list:
if qp.value.isdigit():
opts[qp.name] = int(qp.value)
else:
opts[qp.name] = qp.value
if not opts:
opts = {
'operation': 'explain',
'version': '1.2',
'recordPacking': 'xml'
}
if not 'operation' in opts:
err = self.diagnostic(7,
msg="Mandatory parameter not supplied",
details='operation')
result = self.processUnknownOperation(err, config)
elif not opts['operation'] in ['explain',
'searchRetrieve',
'scan']:
err = self.diagnostic(4,
msg="Unsupported Operation",
details=opts['operation'])
result = self.processUnknownOperation(err, config)
else:
respName = "%sResponse" % opts['operation']
result = getattr(elemFac, respName)()
v = elemFac.version('1.2')
result.append(v)
if not 'version' in opts:
err = self.diagnostic(
7,
msg="Mandatory parameter not supplied",
details='version'
)
dx = self.diagnosticToXml(err)
x = elemFac.diagnostics()
x.append(dx)
result.append(x)
else:
fn = getattr(self, 'process_%s' % opts['operation'])
try:
fn(opts, result)
except cqlParser.Diagnostic as d:
diags = elemFac.diagnostics(self.diagnosticToXml(d))
result.append(diags)
result.append(self.echoedQuery(opts))
self.extraData('response', opts, result)
session.currentResultSet = None
out.append('<?xml version="1.0"?>')
if 'stylesheet' in opts:
out.append(
'<?xml-stylesheet type="text/xsl" '
'href="{0}"?>'.format(opts['stylesheet'])
)
out.append(etree.tostring(result, pretty_print=True))
if len(serv.databaseConfigs) >= 25:
# Cleanup memory
try:
del serv.objects[config.parent.id]
except KeyError:
pass
response_headers = [('Content-Type',
'application/xml'),
('Content-Length',
str(sum([len(d) for d in out])))
]
start_response("200 OK", response_headers)
return out
def environment_application(environ, start_response):
status = '200 OK'
output = ["{0}\n".format(i) for i in environ.iteritems()]
response_headers = [('Content-Type', 'text/plain'),
('Content-Length', str(sum([len(i) for i in output])))]
start_response(status, response_headers)
return output
def main():
"""Start up a simple app server to serve the SRU application."""
from wsgiref.simple_server import make_server
try:
host = sys.argv[1]
except IndexError:
try:
import socket
host = socket.gethostname()
except:
host = 'localhost'
try:
port = int(sys.argv[2])
except IndexError, ValueError:
port = 8000
httpd = make_server(host, port, application)
print """You will be able to access the application at:
http://{0}:{1}""".format(host, port)
httpd.serve_forever()
configs = get_configsFromServer(session, serv)
application = SRUWsgiHandler(session, configs)
if __name__ == "__main__":
sys.exit(main())
| 36.651515
| 79
| 0.472923
|
4a15264e48a612c3914acc2d3c4940523870d1bc
| 10,157
|
py
|
Python
|
build/django-blog-zinnia/zinnia/migrations/0001_initial.py
|
diegopradogesto/MatrixWeb
|
4996f516fd0a90056c092d8991474ccb802d6215
|
[
"MIT"
] | 1
|
2017-12-06T22:17:56.000Z
|
2017-12-06T22:17:56.000Z
|
zinnia/migrations/0001_initial.py
|
bikemule/django-blog-zinnia
|
faaebb111a8e60dbb08ad78ba28160822eb0dea5
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T12:32:28.000Z
|
2021-09-08T12:32:28.000Z
|
zinnia/migrations/0001_initial.py
|
bikemule/django-blog-zinnia
|
faaebb111a8e60dbb08ad78ba28160822eb0dea5
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T10:28:36.000Z
|
2021-09-08T10:28:36.000Z
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('zinnia_category', (
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('zinnia', ['Category'])
# Adding model 'Entry'
db.create_table('zinnia_entry', (
('status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('last_update', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('comment_enabled', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('tags', self.gf('tagging.fields.TagField')()),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('excerpt', self.gf('django.db.models.fields.TextField')(blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('content', self.gf('django.db.models.fields.TextField')()),
('end_publication', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2042, 3, 15, 0, 0))),
('start_publication', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('zinnia', ['Entry'])
# Adding M2M table for field sites on 'Entry'
db.create_table('zinnia_entry_sites', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('entry', models.ForeignKey(orm['zinnia.entry'], null=False)),
('site', models.ForeignKey(orm['sites.site'], null=False))
))
db.create_unique('zinnia_entry_sites', ['entry_id', 'site_id'])
# Adding M2M table for field related on 'Entry'
db.create_table('zinnia_entry_related', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_entry', models.ForeignKey(orm['zinnia.entry'], null=False)),
('to_entry', models.ForeignKey(orm['zinnia.entry'], null=False))
))
db.create_unique('zinnia_entry_related', ['from_entry_id', 'to_entry_id'])
# Adding M2M table for field categories on 'Entry'
db.create_table('zinnia_entry_categories', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('entry', models.ForeignKey(orm['zinnia.entry'], null=False)),
('category', models.ForeignKey(orm['zinnia.category'], null=False))
))
db.create_unique('zinnia_entry_categories', ['entry_id', 'category_id'])
# Adding M2M table for field authors on 'Entry'
db.create_table('zinnia_entry_authors', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('entry', models.ForeignKey(orm['zinnia.entry'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('zinnia_entry_authors', ['entry_id', 'user_id'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table('zinnia_category')
# Deleting model 'Entry'
db.delete_table('zinnia_entry')
# Removing M2M table for field sites on 'Entry'
db.delete_table('zinnia_entry_sites')
# Removing M2M table for field related on 'Entry'
db.delete_table('zinnia_entry_related')
# Removing M2M table for field categories on 'Entry'
db.delete_table('zinnia_entry_categories')
# Removing M2M table for field authors on 'Entry'
db.delete_table('zinnia_entry_authors')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'zinnia.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'zinnia.entry': {
'Meta': {'object_name': 'Entry'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['zinnia.Category']"}),
'comment_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'end_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2042, 3, 15, 0, 0)'}),
'excerpt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_rel_+'", 'null': 'True', 'to': "orm['zinnia.Entry']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'start_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['zinnia']
| 62.312883
| 178
| 0.588658
|
4a15266acdaad7478dc366326daf5ce113703f65
| 912
|
py
|
Python
|
python/tree/leetocde/minimum_depth_binary_tree.py
|
googege/algo-learn
|
054d05e8037005c5810906d837de889108dad107
|
[
"MIT"
] | 153
|
2020-09-24T12:46:51.000Z
|
2022-03-31T21:30:44.000Z
|
python/tree/leetocde/minimum_depth_binary_tree.py
|
googege/algo-learn
|
054d05e8037005c5810906d837de889108dad107
|
[
"MIT"
] | null | null | null |
python/tree/leetocde/minimum_depth_binary_tree.py
|
googege/algo-learn
|
054d05e8037005c5810906d837de889108dad107
|
[
"MIT"
] | 35
|
2020-12-22T11:07:06.000Z
|
2022-03-09T03:25:08.000Z
|
# 二叉树的最小深度
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
# 递归
def minDepth_1(self, root: TreeNode) -> int:
if not root:
return 0
left, right = self.minDepth_1(root.left), self.minDepth_1(root.right)
return left + right + 1 if left == 0 or right == 0 else min(left, right) + 1
# 广度优先搜索
def minDepth_2(self, root: TreeNode) -> int:
queue, res = [root], 0
while root and len(queue) > 0:
n, res = len(queue), res + 1
for _ in range(n):
node = queue.pop(0)
if not node.left and not node.right:
return res
for child in [node.left, node.right]:
if child:
queue.append(child)
return 0
| 26.057143
| 84
| 0.509868
|
4a15268e8b3b9ff3deafd755760bd8f47ead4ec2
| 913
|
py
|
Python
|
tests/testing/__init__.py
|
rushgeo/stactools
|
f5d7502d5e90e3cc376bdf06606264f6b156e54f
|
[
"Apache-2.0"
] | 46
|
2020-08-25T19:49:54.000Z
|
2022-03-14T12:25:48.000Z
|
tests/testing/__init__.py
|
rushgeo/stactools
|
f5d7502d5e90e3cc376bdf06606264f6b156e54f
|
[
"Apache-2.0"
] | 204
|
2020-08-25T13:39:09.000Z
|
2022-03-31T18:03:02.000Z
|
tests/testing/__init__.py
|
rushgeo/stactools
|
f5d7502d5e90e3cc376bdf06606264f6b156e54f
|
[
"Apache-2.0"
] | 25
|
2020-08-25T19:50:12.000Z
|
2022-02-19T03:47:54.000Z
|
from stactools.testing import TestData
test_data = TestData(
__file__, {
"item.json": {
"url": "https://raw.githubusercontent.com/radiantearth/"
"stac-spec/v1.0.0/examples/simple-item.json",
"compress": False,
},
"AW3D30_global.vrt": {
"url": "s3://raster/AW3D30/AW3D30_global.vrt",
"s3": {
"anon": True,
"client_kwargs": {
"endpoint_url": "https://opentopography.s3.sdsc.edu"
}
}
},
"manifest.safe": {
"url":
("https://sentinel2l2a01.blob.core.windows.net/"
"sentinel2-l2/03/K/TV/"
"2020/05/23/"
"S2A_MSIL2A_20200523T213041_N0212_R100_T03KTV_20200910T164427.SAFE/"
"manifest.safe"),
"planetary_computer":
True
}
})
| 30.433333
| 81
| 0.485214
|
4a152755efb6832984e2e4a69bef4013d0b771a6
| 1,767
|
py
|
Python
|
testing_suite/test_search_for_ISM_match.py
|
rhpvorderman/TALON
|
04706575d8ad0975da94c89581c8de5d09522d61
|
[
"MIT"
] | null | null | null |
testing_suite/test_search_for_ISM_match.py
|
rhpvorderman/TALON
|
04706575d8ad0975da94c89581c8de5d09522d61
|
[
"MIT"
] | null | null | null |
testing_suite/test_search_for_ISM_match.py
|
rhpvorderman/TALON
|
04706575d8ad0975da94c89581c8de5d09522d61
|
[
"MIT"
] | null | null | null |
import pytest
from talon import talon
from .helper_fns import fetch_correct_ID, get_db_cursor
@pytest.mark.dbunit
class TestSearchForISM(object):
def test_find_no_match(self):
""" Example where the toy transcript database contains no matches
for the edge set.
"""
conn, cursor = get_db_cursor()
build = "toy_build"
transcript_dict = talon.make_transcript_dict(cursor, build)
conn.close()
edges = ( 100, 200, 300)
matches = talon.search_for_ISM(edges, transcript_dict)
# Make sure that no match got returned
assert matches == None
def test_find_match(self):
""" Example where the toy transcript database contains exactly one
ISM match for the transcript.
"""
conn, cursor = get_db_cursor()
build = "toy_build"
transcript_dict = talon.make_transcript_dict(cursor, build)
edges = ( 2, 3 )
matches = talon.search_for_ISM(edges, transcript_dict)
# Make sure that correct match got returned
correct_gene_ID = fetch_correct_ID("TG1", "gene", cursor)
assert matches[0]["gene_ID"] == correct_gene_ID
conn.close()
def test_find_monoexon_match(self):
""" Input is a sinlge exon that matches part of an existing transcript
"""
conn, cursor = get_db_cursor()
build = "toy_build"
transcript_dict = talon.make_transcript_dict(cursor, build)
edges = ( 14, )
matches = talon.search_for_ISM(edges, transcript_dict)
# Make sure that correct match got returned
correct_gene_ID = fetch_correct_ID("TG2", "gene", cursor)
assert matches[0]["gene_ID"] == correct_gene_ID
conn.close()
| 31.553571
| 78
| 0.642332
|
4a15275caafe7925f55cade340339c7df4e03262
| 203
|
py
|
Python
|
Python/CursoEmVideo/Mundo 1/Desafios/Desafio22.py
|
carlos09v/Mini-Projects_Exercises
|
0d457b5c5c83811fdefa1dd8f80ce436f8dac744
|
[
"MIT"
] | 1
|
2021-08-23T13:04:55.000Z
|
2021-08-23T13:04:55.000Z
|
Python/CursoEmVideo/Mundo 1/Desafios/Desafio22.py
|
carlos09v/Mini-Projects_Exercises
|
0d457b5c5c83811fdefa1dd8f80ce436f8dac744
|
[
"MIT"
] | null | null | null |
Python/CursoEmVideo/Mundo 1/Desafios/Desafio22.py
|
carlos09v/Mini-Projects_Exercises
|
0d457b5c5c83811fdefa1dd8f80ce436f8dac744
|
[
"MIT"
] | null | null | null |
nome= input('Qual o seu nome completo? ')
print(f'Seu nome com letras maiúsculas é: {nome.upper()}')
print(f'Seu nome com letras minúsculas é: {nome.lower()}')
print(f'Seu nome ao todo tem: {len(nome)}')
| 50.75
| 58
| 0.699507
|
4a1528296a2f69b2d83468c4f113db369a6eb6d7
| 5,407
|
py
|
Python
|
src/trainer.py
|
Kirillova-Anastasia/LGFN
|
41570187b0055876c3eb971441cb914186e284b0
|
[
"MIT"
] | null | null | null |
src/trainer.py
|
Kirillova-Anastasia/LGFN
|
41570187b0055876c3eb971441cb914186e284b0
|
[
"MIT"
] | null | null | null |
src/trainer.py
|
Kirillova-Anastasia/LGFN
|
41570187b0055876c3eb971441cb914186e284b0
|
[
"MIT"
] | null | null | null |
import os
import math
from decimal import Decimal
import utility
import torch
import torch.nn.utils as utils
from tqdm import tqdm
from tensorboardX import SummaryWriter
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
if self.args.load != '':
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.optimizer.schedule()
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
# self.ckp.write_log(
# '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
# )
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, idx_scale)
sr = sr * 255
loss = self.loss(sr, hr)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
self.optimizer.step()
timer_model.hold()
#if (batch + 1) % self.args.print_every == 0:
# self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
# (batch + 1) * self.args.batch_size,
# len(self.loader_train.dataset),
# self.loss.display_loss(batch),
# timer_model.release(),
# timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch() + 1
#self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
timer_test = utility.timer()
if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for lr, hr, filename, _ in tqdm(d, ncols=80):
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = sr.data * 255
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
'''
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
'''
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list)
'''
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
'''
# if not self.args.test_only:
# writer = SummaryWriter(self.args.tensorboard)
# writer.add_scalar('Loss', self.loss.log[-1, 0], epoch)
# writer.add_scalar('PSNR', self.ckp.log[-1, idx_data, idx_scale], epoch)
# writer.close()
#self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
#self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
#self.ckp.write_log(
# 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
#)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch >= self.args.epochs
def quantize(img, rgb_range):
return img.mul(255 / rgb_range).clamp(0, 255).round()
| 33.171779
| 93
| 0.511929
|
4a15288480ffb9f4e0a1631adaba406655be8970
| 2,775
|
py
|
Python
|
tests/molecular/functional_groups/functional_group_factory/generic_functional_group_factory/test_get_functional_groups.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
tests/molecular/functional_groups/functional_group_factory/generic_functional_group_factory/test_get_functional_groups.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
tests/molecular/functional_groups/functional_group_factory/generic_functional_group_factory/test_get_functional_groups.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
import itertools as it
from ..utilities import (
are_clone_sequences,
are_same_id_sequences,
atom_id,
)
def test_get_functional_groups(case_data):
"""
Test :meth:`.GenericFunctionalGroupFactory.get_functional_groups`.
Parameters
----------
case_data : :class:`.CaseData`
The test case. Holds the factory, molecule and correct
functional groups.
Returns
-------
None : :class:`NoneType`
"""
_test_get_functional_groups(
factory=case_data.factory,
molecule=case_data.molecule,
functional_groups=case_data.functional_groups,
)
def _test_get_functional_groups(factory, molecule, functional_groups):
"""
Test :meth:`.GenericFunctionalGroupFactory.get_functional_groups`.
Parameters
----------
factory : :class:`.GenericFunctionalGroupFactory`
The factory to test.
molecule : :class:`.Molecule`
The molecule to test.
functional_groups : :class:`tuple`
The correct :class:`.GenericFunctionalGroup` instances.
Returns
-------
None : :class:`NoneType`
"""
for expected_fg, fg in it.zip_longest(
functional_groups,
factory.get_functional_groups(molecule),
):
are_clone_functional_groups(expected_fg, fg)
def are_clone_functional_groups(functional_group1, functional_group2):
"""
Test if the functional groups are clones of each other.
"""
assert functional_group1.__class__ is functional_group2.__class__
are_clone_sequences(
atoms1=sorted(functional_group1.get_atoms(), key=atom_id),
atoms2=sorted(functional_group2.get_atoms(), key=atom_id),
)
are_same_id_sequences(
ids1=sorted(functional_group1.get_atom_ids()),
ids2=sorted(functional_group2.get_atom_ids()),
)
are_same_id_sequences(
ids1=sorted(functional_group1.get_placer_ids()),
ids2=sorted(functional_group2.get_placer_ids()),
)
are_same_id_sequences(
ids1=sorted(functional_group1.get_core_atom_ids()),
ids2=sorted(functional_group2.get_core_atom_ids()),
)
are_clone_sequences(
atoms1=sorted(functional_group1.get_bonders(), key=atom_id),
atoms2=sorted(functional_group2.get_bonders(), key=atom_id),
)
are_same_id_sequences(
ids1=sorted(functional_group1.get_bonder_ids()),
ids2=sorted(functional_group2.get_bonder_ids()),
)
are_clone_sequences(
atoms1=sorted(functional_group1.get_deleters(), key=atom_id),
atoms2=sorted(functional_group2.get_deleters(), key=atom_id),
)
are_same_id_sequences(
ids1=sorted(functional_group1.get_deleter_ids()),
ids2=sorted(functional_group2.get_deleter_ids()),
)
| 26.428571
| 70
| 0.686126
|
4a152966f57e634c97f109402c3f7f9ec9fd602c
| 1,469
|
py
|
Python
|
tensorflow_datasets/audio/libritts_test.py
|
Ir1d/datasets
|
2a04ce5208e53bcf9b5acacf690bb7446285176e
|
[
"Apache-2.0"
] | 2
|
2020-06-09T10:44:36.000Z
|
2020-06-09T10:44:46.000Z
|
tensorflow_datasets/audio/libritts_test.py
|
Ir1d/datasets
|
2a04ce5208e53bcf9b5acacf690bb7446285176e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/audio/libritts_test.py
|
Ir1d/datasets
|
2a04ce5208e53bcf9b5acacf690bb7446285176e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for libritts dataset module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.audio import libritts
class LibriTTSTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = libritts.Libritts
SPLITS = {
"train_clean100": 2,
"train_clean360": 2,
"train_other500": 2,
"test_clean": 2,
"test_other": 2,
"dev_clean": 2,
"dev_other": 2,
}
DL_EXTRACT_RESULT = {
"train_clean100": "train-clean-100",
"train_clean360": "train-clean-360",
"train_other500": "train-other-500",
"test_clean": "test-clean",
"test_other": "test-other",
"dev_clean": "dev-clean",
"dev_other": "dev-other",
}
if __name__ == "__main__":
testing.test_main()
| 29.38
| 74
| 0.710007
|
4a1529e7f1ea739b726dbf66ed9594a9605d3c14
| 2,756
|
py
|
Python
|
data_lake/test/test_actual_split.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | 3
|
2020-01-05T16:46:42.000Z
|
2021-08-02T08:08:39.000Z
|
data_lake/test/test_actual_split.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | 30
|
2019-11-28T15:16:35.000Z
|
2021-08-16T14:49:58.000Z
|
data_lake/test/test_actual_split.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | null | null | null |
from data_lake.test.utils import DataLakeTesting
from chartofaccountDIT.test.factories import (
NaturalCodeFactory,
ProgrammeCodeFactory,
ProjectCodeFactory,
)
from core.models import FinancialYear
from core.utils.generic_helpers import get_current_financial_year
from costcentre.test.factories import (
CostCentreFactory,
DepartmentalGroupFactory,
DirectorateFactory,
)
from forecast.models import (
FinancialCode,
FinancialPeriod,
)
from upload_split_file.models import SplitPayActualFigure
class ActualSplitTests(DataLakeTesting):
def setUp(self):
group_name = "Test Group"
self.group_code = "TestGG"
directorate_name = "Test Directorate"
self.directorate_code = "TestDD"
self.cost_centre_code = 109076
group_obj = DepartmentalGroupFactory(
group_code=self.group_code, group_name=group_name,
)
directorate_obj = DirectorateFactory(
directorate_code=self.directorate_code,
directorate_name=directorate_name,
group=group_obj,
)
cost_centre_obj = CostCentreFactory(
directorate=directorate_obj, cost_centre_code=self.cost_centre_code,
)
current_year = get_current_financial_year()
programme_obj = ProgrammeCodeFactory()
self.programme_code = programme_obj.programme_code
nac_obj = NaturalCodeFactory(economic_budget_code="RESOURCE")
self.nac = nac_obj.natural_account_code
project_obj = ProjectCodeFactory()
self.project_code = project_obj.project_code
self.year_obj = FinancialYear.objects.get(financial_year=current_year)
self.financial_code_obj = FinancialCode.objects.create(
programme=programme_obj,
cost_centre=cost_centre_obj,
natural_account_code=nac_obj,
project_code=project_obj,
)
self.financial_code_obj.save
financial_period_queryset = \
FinancialPeriod.objects.filter(financial_period_code__lt=4)
amount = 0
for period_obj in financial_period_queryset:
SplitPayActualFigure.objects.create(
financial_period=period_obj,
financial_year_id=current_year,
financial_code=self.financial_code_obj,
amount=amount
)
amount += 10000
def test_actual_split_data_returned_in_response(self):
self.url_name = "data_lake_actual_split"
response = self.get_data()
assert response['Content-Type'] == 'text/csv'
rows = response.content.decode("utf-8").split("\n")
cols = rows[0].split(",")
assert len(cols) == 12
assert len(rows) == 5
| 33.609756
| 80
| 0.680697
|
4a152a214b9ff3ab55656863a73c39762d7ca648
| 1,997
|
py
|
Python
|
nonebot_plugin_rua/__init__.py
|
Zeta-qixi/nonebot_plugin_rua
|
cc2f47526f1fc6dcd5b6054b496b949eb1db38d3
|
[
"MIT"
] | null | null | null |
nonebot_plugin_rua/__init__.py
|
Zeta-qixi/nonebot_plugin_rua
|
cc2f47526f1fc6dcd5b6054b496b949eb1db38d3
|
[
"MIT"
] | null | null | null |
nonebot_plugin_rua/__init__.py
|
Zeta-qixi/nonebot_plugin_rua
|
cc2f47526f1fc6dcd5b6054b496b949eb1db38d3
|
[
"MIT"
] | null | null | null |
from io import BytesIO
from os import path
import requests
from nonebot import get_driver, on_command, on_notice
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import Event, PokeNotifyEvent
from nonebot.adapters.cqhttp.message import Message, MessageSegment
from nonebot.typing import T_State
from PIL import Image
from .data_source import generate_gif
master = get_driver().config.master
data_dir = path.join(path.dirname(__file__), 'data')
path = path.abspath(__file__).split('__')[0]
img_src = path + '/data/output.gif'
img = MessageSegment.image(f'file://{img_src}')
rua_me = on_notice()
'''
戳一戳事件
'''
@rua_me.handle()
async def _t3(bot: Bot, event: PokeNotifyEvent):
if event.target_id in master:
creep_id = event.sender_id
else: creep_id = event.target_id
try:
url = f'http://q1.qlogo.cn/g?b=qq&nk={creep_id}&s=160'
resp = requests.get(url)
resp_cont = resp.content
avatar = Image.open(BytesIO(resp_cont))
#<class 'PIL.JpegImagePlugin.JpegImageFile'>
generate_gif(data_dir, avatar)
await bot.send(event, message=img)
except:
pass
rua = on_command('rua')
@rua.handle()
async def rua_handle(bot: Bot, event: Event, state: T_State):
try:
msg = (str(event.raw_message).split('rua')[1].strip())
if ':image' in msg:
state['url'] = (msg.split('url=')[-1][:-2])
elif msg.isdigit():
id = int(msg)
state['url'] = f'http://q1.qlogo.cn/g?b=qq&nk={id}&s=160'
except:
pass
@rua.got("url", prompt="要rua点什么~")
async def rua_got(bot: Bot, event: Event, state: T_State):
msg = str(state['url'])
state['url'] = (msg.split('url=')[-1][:-2])
resp = requests.get(state['url'])
resp_cont = resp.content
try:
avatar = Image.open(BytesIO(resp_cont))
generate_gif(data_dir, avatar)
await rua.finish(img)
except:
pass
| 26.986486
| 69
| 0.635954
|
4a152bcc44a94a25d97a48503c9cd1817cc1cfe3
| 9,379
|
py
|
Python
|
venv/lib/python3.6/site-packages/chardet/chardistribution.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/chardet/chardistribution.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | 1
|
2021-06-01T23:32:38.000Z
|
2021-06-01T23:32:38.000Z
|
venv/lib/python3.6/site-packages/pip/_vendor/chardet/chardistribution.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | null | null | null |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (
EUCTW_CHAR_TO_FREQ_ORDER,
EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO,
)
from .euckrfreq import (
EUCKR_CHAR_TO_FREQ_ORDER,
EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO,
)
from .gb2312freq import (
GB2312_CHAR_TO_FREQ_ORDER,
GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO,
)
from .big5freq import (
BIG5_CHAR_TO_FREQ_ORDER,
BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO,
)
from .jisfreq import (
JIS_CHAR_TO_FREQ_ORDER,
JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO,
)
class CharDistributionAnalysis(object):
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._char_to_freq_order = None
self._table_size = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self.typical_distribution_ratio = None
self._done = None
self._total_chars = None
self._freq_chars = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._done = False
self._total_chars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._freq_chars = 0
def feed(self, char, char_len):
"""feed a character with known length"""
if char_len == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(char)
else:
order = -1
if order >= 0:
self._total_chars += 1
# order is valid
if order < self._table_size:
if 512 > self._char_to_freq_order[order]:
self._freq_chars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
return self.SURE_NO
if self._total_chars != self._freq_chars:
r = self._freq_chars / (
(self._total_chars - self._freq_chars) * self.typical_distribution_ratio
)
if r < self.SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return self.SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._total_chars > self.ENOUGH_DATA_THRESHOLD
def get_order(self, byte_str):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(EUCTWDistributionAnalysis, self).__init__()
self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
self._table_size = EUCTW_TABLE_SIZE
self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = byte_str[0]
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(EUCKRDistributionAnalysis, self).__init__()
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
self._table_size = EUCKR_TABLE_SIZE
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = byte_str[0]
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(GB2312DistributionAnalysis, self).__init__()
self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
self._table_size = GB2312_TABLE_SIZE
self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = byte_str[0], byte_str[1]
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(Big5DistributionAnalysis, self).__init__()
self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
self._table_size = BIG5_TABLE_SIZE
self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = byte_str[0], byte_str[1]
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(SJISDistributionAnalysis, self).__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = byte_str[0], byte_str[1]
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
super(EUCJPDistributionAnalysis, self).__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = byte_str[0]
if char >= 0xA0:
return 94 * (char - 0xA1) + byte_str[1] - 0xa1
else:
return -1
| 37.516
| 88
| 0.653588
|
4a152ef4a15a376f807afd03aa71888fd8e5abd3
| 1,471
|
py
|
Python
|
wifipumpkin3/plugins/captiveflask/DarkLogin.py
|
rafonsecad/wifipumpkin3
|
a59e950062a07d54dc32f124fbb658c573accb35
|
[
"Apache-2.0"
] | 2
|
2021-05-28T16:47:17.000Z
|
2022-03-14T02:02:33.000Z
|
wifipumpkin3/plugins/captiveflask/DarkLogin.py
|
4n6strider/wifipumpkin3
|
809baef0c8116410a26f6b263a457f0a1d7f98b9
|
[
"Apache-2.0"
] | 1
|
2021-02-10T16:12:08.000Z
|
2021-02-10T16:12:08.000Z
|
wifipumpkin3/plugins/captiveflask/DarkLogin.py
|
4n6strider/wifipumpkin3
|
809baef0c8116410a26f6b263a457f0a1d7f98b9
|
[
"Apache-2.0"
] | 1
|
2021-03-27T14:30:11.000Z
|
2021-03-27T14:30:11.000Z
|
import re
from ast import literal_eval
from wifipumpkin3.plugins.captiveflask.plugin import CaptiveTemplatePlugin
import wifipumpkin3.core.utility.constants as C
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
# Copyright 2020 P0cL4bs Team - Marcos Bomfim (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DarkLogin(CaptiveTemplatePlugin):
meta = {
"Name": "DarkLogin",
"Version": "1.0",
"Description": "Example is a simple portal default page",
"Author": "Pumpkin-Dev",
"TemplatePath": C.TEMPLATES_FLASK + "templates/DarkLogin",
"StaticPath": C.TEMPLATES_FLASK + "templates/DarkLogin/static",
"Preview": "plugins/captivePortal/templates/DarkLogin/preview.png",
}
def __init__(self):
for key, value in self.meta.items():
self.__dict__[key] = value
self.dict_domain = {}
self.ConfigParser = False
| 36.775
| 75
| 0.717199
|
4a152f155212227dc270e96f702c5b3cf3cec0f9
| 21,431
|
py
|
Python
|
rpToolServe.py
|
Galaxy-SynBioCAD/rpReader_image
|
bf3c99cbda063001980fbc40463fd45bd2cb88f3
|
[
"MIT"
] | null | null | null |
rpToolServe.py
|
Galaxy-SynBioCAD/rpReader_image
|
bf3c99cbda063001980fbc40463fd45bd2cb88f3
|
[
"MIT"
] | null | null | null |
rpToolServe.py
|
Galaxy-SynBioCAD/rpReader_image
|
bf3c99cbda063001980fbc40463fd45bd2cb88f3
|
[
"MIT"
] | null | null | null |
import os
import copy
import sys
import io
import tarfile
import libsbml
import glob
import tempfile
import logging
import shutil
sys.path.insert(0, '/home/')
import rpTool as rpReader
import rpCache
# TODO: need to fix the input
def rp2Reader_mem(rpreader,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
upper_flux_bound,
lower_flux_bound,
maxRuleIds,
pathway_id,
compartment_id,
species_group_id,
sink_species_group_id,
pubchem_search,
outputTar):
"""The main function to parse the files without writing to HDD
:param rpreader: rpReader object
:param rp2_pathways: The RetroPath2.0 results scope file
:param rp2paths_pathways: The rp2paths result pathway (out_paths) file
:param rp2paths_compounds: The rp2paths result compounds file
:param upper_flux_bound: The default upper flux bound (Default: 999999)
:param lower_flux_bound: The default lower flux bound (Default: 0)
:param maxRuleIds: The maximal number of rules associated with each step (Default: 2)
:param pathway_id: The Groups heterologous pathway id (Default: rp_pathway)
:param compartment_id: The compartment SBML id (Default: MNXC3)
:param species_group_id: The Groups id of the central species (Default: central_species)
:param sink_species_group_id: The Groups id of the rp_sink_species (Default: rp_sink_species)
:param pubchem_search: Use the pubchem database to search for missing cross reference (Default: False)
:param outputTar: The output collection of rpSBML files
:type rpreader: rpReader
:type rp2_pathways: str
:type rp2paths_pathways: str
:type rp2paths_compounds: str
:type upper_flux_bound: int
:type lower_flux_bound: int
:type maxRuleIds: int
:type pathway_id: str
:type compartment_id: str
:type species_group_id: str
:type sink_species_group_id: str
:type pubchem_search: bool
:type outputTar: str
:rtype: bool
:return: Success or failure of the function
"""
rpsbml_paths = rpreader.rp2ToSBML(rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
None,
upper_flux_bound,
lower_flux_bound,
maxRuleIds,
pathway_id,
compartment_id,
species_group_id,
sink_species_group_id,
pubchem_search)
#pass the SBML results to a tar
if rpsbml_paths=={}:
logging.error('rpReader did not generate any results')
return False
#outputTar = io.BytesIO()
#with open(outputTar, 'w:xz') as tf:
with tarfile.open(fileobj=outputTar, mode='w:gz') as tf:
for rpsbml_name in rpsbml_paths:
data = libsbml.writeSBMLToString(rpsbml_paths[rpsbml_name].document).encode('utf-8')
fiOut = io.BytesIO(data)
info = tarfile.TarInfo(name=rpsbml_name)
info.size = len(data)
tf.addfile(tarinfo=info, fileobj=fiOut)
return True
def rp2Reader_hdd(rpreader,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
upper_flux_bound,
lower_flux_bound,
maxRuleIds,
pathway_id,
compartment_id,
species_group_id,
sink_species_group_id,
pubchem_search,
outputTar):
"""The main function to parse the files by writing them to HDD
:param rpreader: rpReader object
:param rp2_pathways: The RetroPath2.0 results scope file
:param rp2paths_pathways: The rp2paths result pathway (out_paths) file
:param rp2paths_compounds: The rp2paths result compounds file
:param upper_flux_bound: The default upper flux bound (Default: 999999)
:param lower_flux_bound: The default lower flux bound (Default: 0)
:param maxRuleIds: The maximal number of rules associated with each step (Default: 2)
:param pathway_id: The Groups heterologous pathway id (Default: rp_pathway)
:param compartment_id: The compartment SBML id (Default: MNXC3)
:param species_group_id: The Groups id of the central species (Default: central_species)
:param sink_species_group_id: The Groups id of the rp_sink_species (Default: rp_sink_species)
:param pubchem_search: Use the pubchem database to search for missing cross reference (Default: False)
:param outputTar: The output collection of rpSBML files
:type rpreader: rpReader
:type rp2_pathways: str
:type rp2paths_pathways: str
:type rp2paths_compounds: str
:type upper_flux_bound: int
:type lower_flux_bound: int
:type maxRuleIds: int
:type pathway_id: str
:type compartment_id: str
:type species_group_id: str
:type sink_species_group_id: str
:type pubchem_search: bool
:type outputTar: str
:rtype: bool
:return: Success or failure of the function
"""
logging.debug(maxRuleIds)
# check that the files are not empty
if sum(1 for line in open(rp2paths_compounds))<=1:
logging.error('RP2paths compounds is empty')
return False
if sum(1 for line in open(rp2_pathways))<=1:
logging.error('RP2 pathways is empty')
return False
if sum(1 for line in open(rp2paths_pathways))<=1:
logging.error('RP2paths pathways is empty')
return False
with tempfile.TemporaryDirectory() as tmpOutputFolder:
#Note the return here is {} and thus we can ignore it
rpsbml_paths = rpreader.rp2ToSBML(rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
tmpOutputFolder,
upper_flux_bound,
lower_flux_bound,
maxRuleIds,
pathway_id,
compartment_id,
species_group_id,
sink_species_group_id,
pubchem_search)
if len(glob.glob(tmpOutputFolder+'/*'))==0:
logging.error('rpReader did not generate any results')
return False
with tarfile.open(outputTar, mode='w:gz') as ot:
for sbml_path in glob.glob(tmpOutputFolder+'/*'):
fileName = str(sbml_path.split('/')[-1].replace('.sbml', '').replace('.rpsbml', '').replace('.xml', ''))+'.sbml.xml'
info = tarfile.TarInfo(fileName)
info.size = os.path.getsize(sbml_path)
ot.addfile(tarinfo=info, fileobj=open(sbml_path, 'rb'))
return True
'''
# DEPRECATED: Needs to be implemmented
def main_string(outputTar,
upper_flux_bound=999999,
lower_flux_bound=0,
maxRuleIds=2,
compartment_id='MNXC3',
pathway_id='rp_pathway',
species_group_id='central_species'):
#pass the cache parameters to the rpReader
rpcache = rpCache.rpCache()
rpreader = rpReader.rpReader()
rpreader.deprecatedCID_cid = rpcache.getDeprecatedCID()
rpreader.deprecatedRID_rid = rpcache.getDeprecatedRID()
rpreader.cid_strc = rpcache.getCIDstrc()
rpreader.inchikey_cid = rpcache.getInchiKeyCID()
rpreader.rr_reactions = rpcache.getRRreactions()
rpreader.cid_xref = rpcache.getCIDxref()
rpreader.xref_comp, rpreader.comp_xref = rpcache.getCompXref()
rpreader.chebi_cid = rpcache.getChebiCID()
rpreader.cid_name = rpcache.getCIDname()
outputTar_bytes = io.BytesIO()
#### MEM #####
"""
if not rp2Reader_mem(rpreader,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
int(upper_flux_bound),
int(lower_flux_bound),
int(maxRuleIds),
pathway_id,
compartment_id,
species_group_id,
outputTar):
abort(204)
"""
#### HDD #####
isOK = rp2Reader_hdd(rpreader,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
int(upper_flux_bound),
int(lower_flux_bound),
int(maxRuleIds),
pathway_id,
compartment_id,
species_group_id,
sink_species_group_id,
pubchem_search,
outputTar_bytes)
if not isOK:
logging.error('Function returned an error')
########IMPORTANT######
outputTar_bytes.seek(0)
#######################
with open(outputTar, 'wb') as f:
shutil.copyfileobj(outputTar_bytes, f, length=131072)
'''
#TODO: change pathway_id to pathway_group_id
def main_rp2(outputTar,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
upper_flux_bound=999999,
lower_flux_bound=0,
maxRuleIds=2,
compartment_id='MNXC3',
pathway_id='rp_pathway',
species_group_id='central_species',
sink_species_group_id='rp_sink_species',
pubchem_search=False):
"""Function parse the results of RetroPath2.0 and rp2paths
:param outputTar: The output collection of rpSBML files
:param rp2_pathways: The RetroPath2.0 results scope file
:param rp2paths_pathways: The rp2paths result pathway (out_paths) file
:param rp2paths_compounds: The rp2paths result compounds file
:param upper_flux_bound: The default upper flux bound (Default: 999999)
:param lower_flux_bound: The default lower flux bound (Default: 0)
:param maxRuleIds: The maximal number of rules associated with each step (Default: 2)
:param compartment_id: The compartment SBML id (Default: MNXC3)
:param pathway_id: The Groups heterologous pathway id (Default: rp_pathway)
:param species_group_id: The Groups id of the central species (Default: central_species)
:param sink_species_group_id: The Groups id of the rp_sink_species (Default: rp_sink_species)
:param pubchem_search: Use the pubchem database to search for missing cross reference (Default: False)
:type outputTar: str
:type rp2_pathways: str
:type rp2paths_pathways: str
:type rp2paths_compounds: str
:type upper_flux_bound: int
:type lower_flux_bound: int
:type maxRuleIds: int
:type compartment_id: str
:type pathway_id: str
:type species_group_id: str
:type sink_species_group_id: str
:type pubchem_search: bool
:rtype: None
:return: None
"""
#pass the cache parameters to the rpReader
rpreader = rpReader.rpReader()
#rpcache = rpToolCache.rpToolCache()
rpcache = rpCache.rpCache()
rpreader.deprecatedCID_cid = rpcache.getDeprecatedCID()
rpreader.deprecatedRID_rid = rpcache.getDeprecatedRID()
rpreader.cid_strc = rpcache.getCIDstrc()
rpreader.inchikey_cid = rpcache.getInchiKeyCID()
rpreader.rr_reactions = rpcache.getRRreactions()
rpreader.cid_xref = rpcache.getCIDxref()
rpreader.xref_comp, rpreader.comp_xref = rpcache.getCompXref()
rpreader.chebi_cid = rpcache.getChebiCID()
rpreader.cid_name = rpcache.getCIDname()
#outputTar_bytes = io.BytesIO()
#### MEM #####
"""
if not rp2Reader_mem(rpreader,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
int(upper_flux_bound),
int(lower_flux_bound),
int(maxRuleIds),
pathway_id,
compartment_id,
species_group_id,
outputTar):
abort(204)
"""
#### HDD #####
isOK = rp2Reader_hdd(rpreader,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
int(upper_flux_bound),
int(lower_flux_bound),
int(maxRuleIds),
pathway_id,
compartment_id,
species_group_id,
sink_species_group_id,
pubchem_search,
outputTar)
if not isOK:
logging.error('Function returned an error')
"""
########IMPORTANT######
outputTar_bytes.seek(0)
#######################
with open(outputTar, 'wb') as f:
shutil.copyfileobj(outputTar_bytes, f, length=131072)
"""
#TODO: need to fix for the new input
#TODO: add the pubchem search
def main_tsv(outputTar,
tsvfile,
upper_flux_bound=999999,
lower_flux_bound=0,
compartment_id='MNXC3',
pathway_id='rp_pathway',
species_group_id='central_species',
sink_species_group_id='rp_sink_species'):
"""Function parse a defined TSV file to convert to rpSBML files
:param outputTar: The output collection of rpSBML files
:param tsvfile: The TSV of pathway to be parsed
:param upper_flux_bound: The default upper flux bound (Default: 999999)
:param lower_flux_bound: The default lower flux bound (Default: 0)
:param compartment_id: The compartment SBML id (Default: MNXC3)
:param pathway_id: The Groups heterologous pathway id (Default: rp_pathway)
:param species_group_id: The Groups id of the central species (Default: central_species)
:param sink_species_group_id: The Groups id of the rp_sink_species (Default: rp_sink_species)
:type outputTar: str
:type tsvfile: str
:type upper_flux_bound: int
:type lower_flux_bound: int
:type maxRuleIds: int
:type compartment_id: str
:type pathway_id: str
:type species_group_id: str
:type sink_species_group_id: str
:type pubchem_search: bool
:rtype: bool
:return: Success or failure of the function
"""
#pass the cache parameters to the rpReader
rpreader = rpReader.rpReader()
#rpcache = rpToolCache.rpToolCache()
rpcache = rpCache.rpCache()
rpreader.deprecatedCID_cid = rpcache.getDeprecatedCID()
rpreader.deprecatedRID_rid = rpcache.getDeprecatedRID()
rpreader.cid_strc = rpcache.getCIDstrc()
rpreader.inchikey_cid = rpcache.getInchiKeyCID()
rpreader.rr_reactions = rpcache.getRRreactions()
rpreader.cid_xref = rpcache.getCIDxref()
rpreader.xref_comp, rpreader.comp_xref = rpcache.getCompXref()
rpreader.chebi_cid = rpcache.getChebiCID()
rpreader.cid_name = rpcache.getCIDname()
with tempfile.TemporaryDirectory() as tmpOutputFolder:
rpreader.TSVtoSBML(tsvfile,
tmpOutputFolder,
upper_flux_bound,
lower_flux_bound,
compartment_id,
pathway_id,
species_group_id)
if len(glob.glob(tmpOutputFolder+'/*'))==0:
logging.error('rpReader did not generate any results')
return False
with tarfile.open(outputTar, mode='w:gz') as ot:
for sbml_path in glob.glob(tmpOutputFolder+'/*'):
fileName = str(sbml_path.split('/')[-1].replace('.sbml', '').replace('.rpsbml', '').replace('.xml', ''))+'.sbml.xml'
info = tarfile.TarInfo(fileName)
info.size = os.path.getsize(sbml_path)
ot.addfile(tarinfo=info, fileobj=open(sbml_path, 'rb'))
return True
#TODO: change pathway_id to pathway_group_id
def main_extrules(outputTar,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
rules_rall_tsv,
compounds_tsv,
upper_flux_bound=999999,
lower_flux_bound=0,
maxRuleIds=2,
compartment_id='MNXC3',
pathway_id='rp_pathway',
species_group_id='central_species',
sink_species_group_id='rp_sink_species',
pubchem_search=False):
"""Function parse the results of RetroPath2.0 and rp2paths including external rules file
:param outputTar: The output collection of rpSBML files
:param rp2_pathways: The RetroPath2.0 results scope file
:param rp2paths_pathways: The rp2paths result pathway (out_paths) file
:param rp2paths_compounds: The rp2paths result compounds file
:param rules_rall_tsv: The rules file
:param compounds_tsv: The compound file
:param upper_flux_bound: The default upper flux bound (Default: 999999)
:param lower_flux_bound: The default lower flux bound (Default: 0)
:param maxRuleIds: The maximal number of rules associated with each step (Default: 2)
:param compartment_id: The compartment SBML id (Default: MNXC3)
:param pathway_id: The Groups heterologous pathway id (Default: rp_pathway)
:param species_group_id: The Groups id of the central species (Default: central_species)
:param sink_species_group_id: The Groups id of the rp_sink_species (Default: rp_sink_species)
:param pubchem_search: Use the pubchem database to search for missing cross reference (Default: False)
:type outputTar: str
:type rp2_pathways: str
:type rp2paths_pathways: str
:type rp2paths_compounds: str
:type rules_rall_tsv: str
:type compounds_tsv: str
:type upper_flux_bound: int
:type lower_flux_bound: int
:type maxRuleIds: int
:type compartment_id: str
:type pathway_id: str
:type species_group_id: str
:type sink_species_group_id: str
:type pubchem_search: bool
:rtype: None
:return: None
"""
#pass the cache parameters to the rpReader
rpreader = rpReader.rpReader()
##### parse and merge the input files ####
rpcache = rpCache.rpCache()
#if you want to merge
'''
#compounds strc
rpcache.retroRulesStrc(compounds_tsv)
new_cid_strc = copy.deepcopy(rpcache.cid_strc)
rpcache.cid_strc = {**rpcache.getCIDstrc(), **new_cid_strc}
rpcache._inchikeyCID()
rpreader.cid_strc = rpcache.cid_strc
rpreader.inchikey_cid = rpcache.inchikey_cid
#reaction rules
rpcache.retroReactions(rules_rall_tsv)
new_rr_reactions = copy.deepcopy(rpcache.rr_reactions)
rpreader.rr_reactions = {**rpcache.getRRreactions(), **new_rr_reactions}
'''
#if you want to overwrite
#compounds strc
rpcache.retroRulesStrc(compounds_tsv)
new_cid_strc = copy.deepcopy(rpcache.cid_strc)
rpcache.cid_strc = {**rpcache.getCIDstrc(), **new_cid_strc}
rpcache._inchikeyCID()
rpreader.cid_strc = rpcache.cid_strc
rpreader.inchikey_cid = rpcache.inchikey_cid
#reaction rules
rpcache.retroReactions(rules_rall_tsv)
rpreader.rr_reactions = rpcache.rr_reactions
####
rpreader.deprecatedCID_cid = rpcache.getDeprecatedCID()
rpreader.deprecatedRID_rid = rpcache.getDeprecatedRID()
rpreader.inchikey_cid = rpcache.getInchiKeyCID()
rpreader.cid_xref = rpcache.getCIDxref()
rpreader.xref_comp, rpreader.comp_xref = rpcache.getCompXref()
rpreader.chebi_cid = rpcache.getChebiCID()
rpreader.cid_name = rpcache.getCIDname()
#outputTar_bytes = io.BytesIO()
#### MEM #####
"""
if not rp2Reader_mem(rpreader,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
int(upper_flux_bound),
int(lower_flux_bound),
int(maxRuleIds),
pathway_id,
compartment_id,
species_group_id,
outputTar):
abort(204)
"""
#### HDD #####
isOK = rp2Reader_hdd(rpreader,
rp2_pathways,
rp2paths_pathways,
rp2paths_compounds,
int(upper_flux_bound),
int(lower_flux_bound),
int(maxRuleIds),
pathway_id,
compartment_id,
species_group_id,
sink_species_group_id,
pubchem_search,
outputTar)
if not isOK:
logging.error('Function returned an error')
"""
########IMPORTANT######
outputTar_bytes.seek(0)
#######################
with open(outputTar, 'wb') as f:
shutil.copyfileobj(outputTar_bytes, f, length=131072) """
| 40.743346
| 132
| 0.606225
|
4a152f75fa1ef38a8c852c06b67f6cdb6e204faa
| 1,004
|
py
|
Python
|
fridge_web/my_fridge/migrations/0003_auto_20200810_1127.py
|
logiflo/snowplow-embeded-fridge
|
8d356f8c5f225de7a50c04ac9a88c4b3ae89d7cd
|
[
"BSD-2-Clause"
] | 1
|
2020-08-28T08:32:35.000Z
|
2020-08-28T08:32:35.000Z
|
my_fridge/migrations/0003_auto_20200810_1127.py
|
logiflo/fridge-django
|
07fe585d65698ac78a2499ec1674738859324d05
|
[
"BSD-2-Clause"
] | null | null | null |
my_fridge/migrations/0003_auto_20200810_1127.py
|
logiflo/fridge-django
|
07fe585d65698ac78a2499ec1674738859324d05
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 3.1 on 2020-08-10 11:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('my_fridge', '0002_auto_20200810_1053'),
]
operations = [
migrations.AlterModelOptions(
name='food',
options={'verbose_name_plural': 'food'},
),
migrations.CreateModel(
name='Quantity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('units', models.CharField(choices=[('kilogram', 'Kg'), ('gram', 'g'), ('litre', 'L'), ('unit', 'unit')], max_length=15)),
('quant', models.FloatField()),
('food', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='my_fridge.food')),
],
options={
'verbose_name_plural': 'quantities',
},
),
]
| 32.387097
| 138
| 0.557769
|
4a153074107b94debb3f76f20b9ce040b32a4562
| 7,431
|
py
|
Python
|
bigml/tests/test_43_linear.py
|
bikramkharal/python-1
|
106c5cfbae94ef8e3a13fb8fa9899f7f03a5d192
|
[
"Apache-2.0"
] | 1
|
2021-07-08T07:45:23.000Z
|
2021-07-08T07:45:23.000Z
|
bigml/tests/test_43_linear.py
|
sxfmol/python-1
|
d3de9617c91909b9923e80a1401f12c77114d0aa
|
[
"Apache-2.0"
] | 6
|
2016-10-27T18:26:12.000Z
|
2017-10-03T22:54:20.000Z
|
bigml/tests/test_43_linear.py
|
sxfmol/python-1
|
d3de9617c91909b9923e80a1401f12c77114d0aa
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2019-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating Linear Regression
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_linear_steps as linear_create
from . import create_prediction_steps as prediction_create
from . import create_batch_prediction_steps as batch_pred_create
class TestLinearRegression(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a linear regression from a dataset:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a linear regression from a dataset
And I wait until the linear regression is ready less than <time_3> secs
And I update the linear regression name to "<linear_name>"
When I wait until the linear regression is ready less than <time_4> secs
Then the linear regression name is "<linear_name>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | linear_name |
| ../data/iris.csv | 10 | 10 | 20 | 20 | my new linear regression name |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/grades.csv', '100', '100', '200', '200', 'my new linear regression name']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
linear_create.i_create_a_linear_regression_from_dataset(self)
linear_create.the_linear_regression_is_finished_in_less_than(self, example[3])
linear_create.i_update_linear_regression_name(self, example[5])
linear_create.the_linear_regression_is_finished_in_less_than(self, example[4])
linear_create.i_check_linear_name(self, example[5])
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario2(self):
"""
Scenario: Successfully creating a prediction from linear regression:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a pca
And I wait until the linear regression is ready less than <time_3> secs
When I create a prediction for "<data_input>"
Then the prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input |objective | prediction |
"""
print(self.test_scenario2.__doc__)
examples = [
['data/grades.csv', '30', '30', '30', '{"000000": 0.5, "000001": 1, "000002": 1, "000003": 1}', "000005", '2.27312', '{}'],
['data/grades.csv', '30', '30', '30', '{"000000": 0.5, "000001": 1, "000002": 1, "000003": 1}', "000005", '8.19619', '{"bias": false}'],
['data/dates.csv', '30', '30', '30', '{"test-num1": 23, "test-num2" : 54, "test-date.day-of-month":2, "test-date.month":12, "test-date.day-of-week": 2, "test-date.year": 2012}', "000003", '48.27679', '{"bias": false}']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
linear_create.i_create_a_linear_regression_with_objective_and_params(self, example[5], example[7])
linear_create.the_linear_regression_is_finished_in_less_than(self, example[3])
prediction_create.i_create_a_linear_prediction(self, example[4])
prediction_create.the_prediction_is(self, example[5], example[6])
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario3(self):
"""
Scenario: Successfully creating a batch prediction from a linear regression:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a linear regression
And I wait until the linear regression is ready less than <time_3> secs
When I create a batch prediction for the dataset with the linear regression
And I wait until the batch predictin is ready less than <time_4> secs
And I download the created predictions file to "<local_file>"
Then the batch prediction file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | local_file | predictions_file |
"""
print(self.test_scenario3.__doc__)
examples = [
['data/grades.csv', '30', '30', '50', '50', 'tmp/batch_predictions.csv', 'data/batch_predictions_linear.csv']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
linear_create.i_create_a_linear_regression_from_dataset(self)
linear_create.the_linear_regression_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_linear_batch_prediction(self)
batch_pred_create.the_batch_prediction_is_finished_in_less_than(self, example[4])
batch_pred_create.i_download_predictions_file(self, example[5])
batch_pred_create.i_check_predictions(self, example[6])
| 51.248276
| 231
| 0.628852
|
4a153075f85a55a26927674417f1f9fdfc0903b4
| 6,483
|
py
|
Python
|
timedependent/exp2b.py
|
BarbaraV/gridlod-timedependent
|
7ac1f3ade1a6a47b02e1d7ea1c79ef18d8084cce
|
[
"BSD-2-Clause"
] | null | null | null |
timedependent/exp2b.py
|
BarbaraV/gridlod-timedependent
|
7ac1f3ade1a6a47b02e1d7ea1c79ef18d8084cce
|
[
"BSD-2-Clause"
] | null | null | null |
timedependent/exp2b.py
|
BarbaraV/gridlod-timedependent
|
7ac1f3ade1a6a47b02e1d7ea1c79ef18d8084cce
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
import scipy.io as sio
from gridlod.world import World
from gridlod import fem, util
import algorithms, timestepping
NFine = np.array([512,512])
NpFine = np.prod(NFine+1)
T= 1.
deltatref = 1./128.
NTimeref = int(T/deltatref)
dim = np.size(NFine)
boundaryConditions = np.array([[0, 0], [0, 0]])
xpFine = util.pCoordinates(NFine)
#inspired by Ammari, Hiltunen - discontinuous in space
b = 0.5
om = 9.
Nepsilon=np.array([128,128])
amult = lambda x: algorithms.build_inclusions_defect_2d(x,Nepsilon,1.,10.)
modelList = ['add', 'local']
def afunc_local(x,t):
dom = ((x[:, 0] > 0.25) & (x[:, 0] < 0.75) & (x[:, 1] > 0.25) & (x[:, 1] < 0.75)).astype(float)
return amult(x) * (1+b*np.cos(om*t)*dom)
afunc_add = lambda x,t: amult(x)+1+b*np.cos(om*t)
afuncList = {'local': afunc_local, 'add': afunc_add}
u0 = np.zeros_like(xpFine[:,0])
v0 = np.zeros_like(xpFine[:,0])
ffunc = lambda x, t: np.sin(np.pi*x[:,0])*np.sin(np.pi*x[:,1])*(5*t+50*t**2)
NList = [4,8,16,32]
kList = [1,2,2,3]
#LOD solution - error for fixed space discretozation and variable tol
NCoarse = np.array([32,32])
k = 3
tolList = [-0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.01]
deltat = 1./64.
NTime = int(T/deltat)
for model in modelList:
relL2u_tol = np.zeros(len(tolList))
relH1u_tol = np.zeros(len(tolList))
relL2v_tol = np.zeros(len(tolList))
relenergy_tol = np.zeros(len(tolList))
outdict = {'maxtol': [], 'avupdate': []}
afunc = afuncList[model]
NCoarseElement = NFine // NCoarse
world = World(NCoarse, NCoarseElement, boundaryConditions)
xpCoarse = util.pCoordinates(NCoarse)
for ii in range(len(tolList)):
tol = tolList[ii]
if ii == 0: #only compute refsol once
#reference solution
outRef = timestepping.implicitMidpoint_fem(world, ffunc, afunc, u0, v0, deltatref, NTimeref, coarse=False, storev=True)
uRef = outRef[0]
vRef = outRef[1]
MFull = fem.assemblePatchMatrix(world.NWorldFine, world.MLocFine)
AoneFull = fem.assemblePatchMatrix(world.NWorldFine, world.ALocFine) #could also include coeff a (e.g. at end time) here
uRef_l2norm = np.sqrt(np.dot(MFull*uRef, uRef))
uRef_h1norm = np.sqrt(np.dot(AoneFull*uRef, uRef))
vRef_l2norm = np.sqrt(np.dot(MFull*vRef, vRef))
vRef_h1norm = np.sqrt(np.dot(AoneFull*vRef, vRef))
print('reference solution computed')
outadap = timestepping.implicitMidpoint_lod(world, ffunc, afunc, u0, v0, deltat, NTime, k, tol, scale=True, storev=True,
outdict = outdict)
uLodadap = outadap[0]
vLodadap = outadap[1]
l2erru = np.sqrt(np.dot(MFull*(uRef-uLodadap), uRef-uLodadap))
print('LOD for Nc = {}, deltat = {}, tol = {}'.format(NCoarse[0], deltat, tol))
print('absolute L2 error in u: {}'.format(l2erru))
print('relative L2 error in u: {}'.format(l2erru/uRef_l2norm))
h1erru = np.sqrt(np.dot(AoneFull*(uRef-uLodadap), uRef-uLodadap))
print('absolute H1 error in u: {}'.format(h1erru))
print('relative H1 error in u: {}'.format(h1erru/uRef_h1norm))
l2errv = np.sqrt(np.dot(MFull*(vRef-vLodadap), vRef-vLodadap))
print('absolute L2 error in v: {}'.format(l2errv))
print('relative L2 error in v: {}'.format(l2errv/vRef_l2norm))
h1errv = np.sqrt(np.dot(AoneFull*(vRef-vLodadap), vRef-vLodadap))
print('absolute H1 error in v: {}'.format(h1errv))
print('relative H1 error in v: {}'.format(h1errv/vRef_h1norm))
print('relative error in energy norm: {}'.format(np.sqrt(h1erru**2+l2errv**2)/np.sqrt(uRef_h1norm**2+vRef_l2norm)))
relL2u_tol[ii] = l2erru/uRef_l2norm
relH1u_tol[ii] = h1erru/uRef_h1norm
relL2v_tol[ii] = l2errv/vRef_l2norm
relenergy_tol[ii] = np.sqrt(h1erru**2+l2errv**2)/np.sqrt(uRef_h1norm**2+vRef_l2norm)
#LOD solution -error for fixed tol
deltat = 1./64.
NTime = int(T/deltat)
tol = 0.5
relL2u_space = np.zeros(len(NList))
relH1u_space = np.zeros(len(NList))
relL2v_space = np.zeros(len(NList))
relenergy_space = np.zeros(len(NList))
for ii in range(len(NList)):
NCoarse = np.array([NList[ii],NList[ii]])
k = kList[ii]
NCoarseElement = NFine // NCoarse
world = World(NCoarse, NCoarseElement, boundaryConditions)
xpCoarse = util.pCoordinates(NCoarse)
outadap = timestepping.implicitMidpoint_lod(world, ffunc, afunc, u0, v0, deltat, NTime, k, tol, scale=True, storev=True)
uLodadap = outadap[0]
vLodadap = outadap[1]
l2erru = np.sqrt(np.dot(MFull*(uRef-uLodadap), uRef-uLodadap))
print('LOD for Nc = {}, deltat = {}, tol = {}'.format(NCoarse[0], deltat, tol))
print('absolute L2 error in u: {}'.format(l2erru))
print('relative L2 error in u: {}'.format(l2erru/uRef_l2norm))
h1erru = np.sqrt(np.dot(AoneFull*(uRef-uLodadap), uRef-uLodadap))
print('absolute H1 error in u: {}'.format(h1erru))
print('relative H1 error in u: {}'.format(h1erru/uRef_h1norm))
l2errv = np.sqrt(np.dot(MFull*(vRef-vLodadap), vRef-vLodadap))
print('absolute L2 error in v: {}'.format(l2errv))
print('relative L2 error in v: {}'.format(l2errv/vRef_l2norm))
h1errv = np.sqrt(np.dot(AoneFull*(vRef-vLodadap), vRef-vLodadap))
print('absolute H1 error in v: {}'.format(h1errv))
print('relative H1 error in v: {}'.format(h1errv/vRef_h1norm))
print('relative error in energy norm: {}'.format(np.sqrt(h1erru**2+l2errv**2)/np.sqrt(uRef_h1norm**2+vRef_l2norm)))
relL2u_space[ii] = l2erru/uRef_l2norm
relH1u_space[ii] = h1erru/uRef_h1norm
relL2v_space[ii] = l2errv/vRef_l2norm
relenergy_space[ii] = np.sqrt(h1erru**2+l2errv**2)/np.sqrt(uRef_h1norm**2+vRef_l2norm)
sio.savemat('exp2b_'+model+'_errs_space.mat', {'NList': NList, 'L2u': relL2u_space, 'H1u': relH1u_space,
'L2vs': relL2v_space, 'energy': relenergy_space})
sio.savemat('exp2b_'+model+'_errs_tol.mat', {'tolList': tolList, 'L2u': relL2u_tol, 'H1u': relH1u_tol,
'L2vs': relL2v_tol, 'energy': relenergy_tol,
'maxtol': outdict['maxtol'], 'avupdate': outdict['avupdate']})
| 41.031646
| 132
| 0.621163
|
4a1530af608dadb0c3fd6d15ca34141ea5467dd9
| 9,344
|
py
|
Python
|
tb/pcie_us_axi_master_wr/test_pcie_us_axi_master_wr.py
|
totuwei/verilog-pcie
|
17d7353523b5c7f4dd1c08c6709829893b82f12c
|
[
"MIT"
] | null | null | null |
tb/pcie_us_axi_master_wr/test_pcie_us_axi_master_wr.py
|
totuwei/verilog-pcie
|
17d7353523b5c7f4dd1c08c6709829893b82f12c
|
[
"MIT"
] | null | null | null |
tb/pcie_us_axi_master_wr/test_pcie_us_axi_master_wr.py
|
totuwei/verilog-pcie
|
17d7353523b5c7f4dd1c08c6709829893b82f12c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import re
from contextlib import contextmanager
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotb.regression import TestFactory
from cocotbext.axi import AxiStreamBus
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice
from cocotbext.axi import AxiWriteBus, AxiRamWrite
@contextmanager
def assert_raises(exc_type, pattern=None):
try:
yield
except exc_type as e:
if pattern:
assert re.match(pattern, str(e)), \
"Correct exception type caught, but message did not match pattern"
pass
else:
raise AssertionError("{} was not raised".format(exc_type.__name__))
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.dev = UltraScalePlusPcieDevice(
# configuration options
pcie_generation=3,
# pcie_link_width=2,
# user_clk_frequency=250e6,
alignment="dword",
cq_cc_straddle=False,
rq_rc_straddle=False,
rc_4tlp_straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=False,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
user_clk=dut.clk,
user_reset=dut.rst,
cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq")
)
self.dev.log.setLevel(logging.DEBUG)
self.dev.functions[0].configure_bar(0, 16*1024*1024)
self.dev.functions[0].configure_bar(1, 16*1024, io=True)
self.rc.make_port().connect(self.dev)
# AXI
self.axi_ram = AxiRamWrite(AxiWriteBus.from_prefix(dut, "m_axi"), dut.clk, dut.rst, size=2**16)
# monitor error outputs
self.status_error_uncor_asserted = False
cocotb.fork(self._run_monitor_status_error_uncor())
def set_idle_generator(self, generator=None):
if generator:
self.dev.cq_source.set_pause_generator(generator())
self.axi_ram.b_channel.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.axi_ram.aw_channel.set_pause_generator(generator())
self.axi_ram.w_channel.set_pause_generator(generator())
async def _run_monitor_status_error_uncor(self):
while True:
await RisingEdge(self.dut.status_error_uncor)
self.log.info("status_error_uncor (uncorrectable error) was asserted")
self.status_error_uncor_asserted = True
async def run_test_write(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
byte_lanes = tb.axi_ram.byte_lanes
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
await FallingEdge(dut.rst)
await Timer(100, 'ns')
await tb.rc.enumerate()
dev_bar0 = tb.rc.tree[0][0].bar_window[0]
for length in list(range(0, byte_lanes*2))+[1024]:
for pcie_offset in list(range(byte_lanes))+list(range(4096-byte_lanes, 4096)):
tb.log.info("length %d, pcie_offset %d", length, pcie_offset)
pcie_addr = pcie_offset+0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axi_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
await dev_bar0.write(pcie_addr, test_data)
await Timer(length*4+150, 'ns')
tb.log.debug("%s", tb.axi_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48, prefix="AXI "))
assert tb.axi_ram.read(pcie_addr-1, len(test_data)+2) == b'\x55'+test_data+b'\x55'
assert not tb.status_error_uncor_asserted
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_bad_ops(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
await FallingEdge(dut.rst)
await Timer(100, 'ns')
await tb.rc.enumerate()
dev_bar0 = tb.rc.tree[0][0].bar_window[0]
dev_bar1 = tb.rc.tree[0][0].bar_window[1]
tb.log.info("Test read")
length = 4
pcie_addr = 0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axi_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
tb.axi_ram.write(pcie_addr, test_data)
tb.log.debug("%s", tb.axi_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48, prefix="AXI "))
with assert_raises(Exception, "Timeout"):
val = await dev_bar0.read(pcie_addr, len(test_data), timeout=1000, timeout_unit='ns')
assert tb.status_error_uncor_asserted
tb.status_error_uncor_asserted = False
tb.log.info("Test IO write")
length = 4
pcie_addr = 0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axi_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
with assert_raises(Exception, "Timeout"):
await dev_bar1.write(pcie_addr, test_data, timeout=1000, timeout_unit='ns')
await Timer(100, 'ns')
tb.log.debug("%s", tb.axi_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48, prefix="AXI "))
assert tb.axi_ram.read(pcie_addr-1, len(test_data)+2) == b'\x55'*(len(test_data)+2)
assert tb.status_error_uncor_asserted
tb.status_error_uncor_asserted = False
tb.log.info("Test IO read")
length = 4
pcie_addr = 0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axi_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
tb.axi_ram.write(pcie_addr, test_data)
tb.log.debug("%s", tb.axi_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48, prefix="AXI "))
with assert_raises(Exception, "Timeout"):
val = await dev_bar1.read(pcie_addr, len(test_data), timeout=1000, timeout_unit='ns')
assert tb.status_error_uncor_asserted
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
if cocotb.SIM_NAME:
for test in [run_test_write, run_test_bad_ops]:
factory = TestFactory(test)
factory.add_option(("idle_inserter", "backpressure_inserter"), [(None, None), (cycle_pause, cycle_pause)])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("axis_pcie_data_width", [64, 128, 256, 512])
def test_pcie_us_axi_master_wr(request, axis_pcie_data_width):
dut = "pcie_us_axi_master_wr"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['AXIS_PCIE_DATA_WIDTH'] = axis_pcie_data_width
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 88 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 183
parameters['AXI_DATA_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH']
parameters['AXI_ADDR_WIDTH'] = 64
parameters['AXI_STRB_WIDTH'] = parameters['AXI_DATA_WIDTH'] // 8
parameters['AXI_ID_WIDTH'] = 8
parameters['AXI_MAX_BURST_LEN'] = 256
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
extra_env['COCOTB_RESOLVE_X'] = 'RANDOM'
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| 31.782313
| 134
| 0.6814
|
4a15312f6a9b13aca5c56e4b3393c20da3bfd9a0
| 1,856
|
py
|
Python
|
examples/aiohttp_example.py
|
gugu/aiozipkin
|
c5ba4891e48c48e108ce1190ef4d1d68c82c34fa
|
[
"Apache-2.0"
] | null | null | null |
examples/aiohttp_example.py
|
gugu/aiozipkin
|
c5ba4891e48c48e108ce1190ef4d1d68c82c34fa
|
[
"Apache-2.0"
] | null | null | null |
examples/aiohttp_example.py
|
gugu/aiozipkin
|
c5ba4891e48c48e108ce1190ef4d1d68c82c34fa
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import aiozipkin as az
from aiohttp import web
async def handle(request):
tracer = az.get_tracer(request.app)
span = az.request_span(request)
with tracer.new_child(span.context) as child_span:
child_span.name('mysql:select')
# call to external service like https://python.org
# or database query
await asyncio.sleep(0.01)
text = """
<html lang="en">
<head>
<title>aiohttp simple example</title>
</head>
<body>
<h3>This page was traced by aiozipkin</h3>
<p><a href="http://127.0.0.1:9001/status">Go to not traced page</a></p>
</body>
</html>
"""
return web.Response(text=text, content_type='text/html')
async def not_traced_handle(request):
text = """
<html lang="en">
<head>
<title>aiohttp simple example</title>
</head>
<body>
<h3>This page was NOT traced by aiozipkin></h3>
<p><a href="http://127.0.0.1:9001">Go to traced page</a></p>
</body>
</html>
"""
return web.Response(text=text, content_type='text/html')
async def make_app(host, port):
app = web.Application()
app.router.add_get('/', handle)
# here we aquire reference to route, so later we can command
# aiozipkin not to trace it
skip_route = app.router.add_get('/status', not_traced_handle)
endpoint = az.create_endpoint(
'aiohttp_server', ipv4=host, port=port)
zipkin_address = 'http://127.0.0.1:9411'
tracer = await az.create(zipkin_address, endpoint, sample_rate=1.0)
az.setup(app, tracer, skip_routes=[skip_route])
return app
def run():
host = '127.0.0.1'
port = 9001
loop = asyncio.get_event_loop()
app = loop.run_until_complete(make_app(host, port))
web.run_app(app, host=host, port=port)
if __name__ == '__main__':
run()
| 25.777778
| 79
| 0.62931
|
4a153251463f1495da41a1b90dbc0c26f79510b9
| 520
|
py
|
Python
|
kazuate.py
|
okutani-t/python-code
|
af923a0865f11f3ec3d0bbe16248bef0e7e211e2
|
[
"MIT"
] | null | null | null |
kazuate.py
|
okutani-t/python-code
|
af923a0865f11f3ec3d0bbe16248bef0e7e211e2
|
[
"MIT"
] | null | null | null |
kazuate.py
|
okutani-t/python-code
|
af923a0865f11f3ec3d0bbe16248bef0e7e211e2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import random
i = random.randint(1,50)
n = 0
count = 5
#答えnとユーザの入力した値iが間違っている間ループ
while n != i :
print"あと"+ str(count) + "回遊べるよ"
count -= 1
n = input("Please Input Number 1-50:")
if count ==0 and n != i:
break
if n>i:
if n==i+1:
print"おしい!もう少し小さい数!"
else:
print"もっと小さい数だよ!"
elif n<i:
if n==i-1:
print"おしい!もう少し大きい数!"
else:
print"もっと大きい数だよ!"
else:
pass
if count==0 and n != i :
print"※※※※※残念!正解は"+ str(i) + "でした!※※※※※"
else:
print"※※※※※正解!"+ str(i) + "だったよ※※※※※"
| 16.774194
| 41
| 0.565385
|
4a15339993d21f7ec445b2fc2a516cff9eebcc81
| 3,241
|
py
|
Python
|
pymc3/glm/families.py
|
acolombi/pymc3
|
3cb45700156b63e786eb70909d3e1d6e1f21703a
|
[
"Apache-2.0"
] | 1
|
2018-06-11T03:13:00.000Z
|
2018-06-11T03:13:00.000Z
|
pymc3/glm/families.py
|
acolombi/pymc3
|
3cb45700156b63e786eb70909d3e1d6e1f21703a
|
[
"Apache-2.0"
] | 2
|
2017-03-02T05:56:13.000Z
|
2019-12-06T19:15:42.000Z
|
pymc3/glm/families.py
|
acolombi/pymc3
|
3cb45700156b63e786eb70909d3e1d6e1f21703a
|
[
"Apache-2.0"
] | 1
|
2018-10-08T10:27:35.000Z
|
2018-10-08T10:27:35.000Z
|
import numbers
import numpy as np
from copy import copy
import theano.tensor as tt
from ..model import modelcontext
from .. import distributions as pm_dists
__all__ = ['Normal', 'StudentT', 'Binomial', 'Poisson', 'NegativeBinomial']
# Define link functions
# Hack as assigning a function in the class definition automatically binds
# it as a method.
class Identity():
def __call__(self, x):
return x
identity = Identity()
logit = tt.nnet.sigmoid
inverse = tt.inv
exp = tt.exp
class Family(object):
"""Base class for Family of likelihood distribution and link functions.
"""
priors = {}
link = None
def __init__(self, **kwargs):
# Overwrite defaults
for key, val in kwargs.items():
if key == 'priors':
self.priors = copy(self.priors)
self.priors.update(val)
else:
setattr(self, key, val)
def _get_priors(self, model=None, name=''):
"""Return prior distributions of the likelihood.
Returns
-------
dict : mapping name -> pymc3 distribution
"""
if name:
name = '{}_'.format(name)
model = modelcontext(model)
priors = {}
for key, val in self.priors.items():
if isinstance(val, (numbers.Number, np.ndarray, np.generic)):
priors[key] = val
else:
priors[key] = model.Var('{}{}'.format(name, key), val)
return priors
def create_likelihood(self, name, y_est, y_data, model=None):
"""Create likelihood distribution of observed data.
Parameters
----------
y_est : theano.tensor
Estimate of dependent variable
y_data : array
Observed dependent variable
"""
priors = self._get_priors(model=model, name=name)
# Wrap y_est in link function
priors[self.parent] = self.link(y_est)
if name:
name = '{}_'.format(name)
return self.likelihood('{}y'.format(name), observed=y_data, **priors)
def __repr__(self):
return """Family {klass}:
Likelihood : {likelihood}({parent})
Priors : {priors}
Link function: {link}.""".format(klass=self.__class__, likelihood=self.likelihood.__name__, parent=self.parent, priors=self.priors, link=self.link)
class StudentT(Family):
link = identity
likelihood = pm_dists.StudentT
parent = 'mu'
priors = {'lam': pm_dists.HalfCauchy.dist(beta=10, testval=1.),
'nu': 1}
class Normal(Family):
link = identity
likelihood = pm_dists.Normal
parent = 'mu'
priors = {'sd': pm_dists.HalfCauchy.dist(beta=10, testval=1.)}
class Binomial(Family):
link = logit
likelihood = pm_dists.Binomial
parent = 'p'
priors = {'n': 1}
class Poisson(Family):
link = exp
likelihood = pm_dists.Poisson
parent = 'mu'
priors = {'mu': pm_dists.HalfCauchy.dist(beta=10, testval=1.)}
class NegativeBinomial(Family):
link = exp
likelihood = pm_dists.NegativeBinomial
parent = 'mu'
priors = {'mu': pm_dists.HalfCauchy.dist(beta=10, testval=1.),
'alpha': pm_dists.HalfCauchy.dist(beta=10, testval=1.)}
| 26.785124
| 151
| 0.603517
|
4a15340c5bc3ecca94d19009ce0690e4221ee508
| 16,713
|
py
|
Python
|
library/azure_rm_trafficmanagerprofile.py
|
joaocc/azure_preview_modules
|
2413dafa6f979a2070843b073830901cc1b1d868
|
[
"MIT"
] | 46
|
2018-01-24T08:39:15.000Z
|
2021-08-20T04:41:16.000Z
|
library/azure_rm_trafficmanagerprofile.py
|
joaocc/azure_preview_modules
|
2413dafa6f979a2070843b073830901cc1b1d868
|
[
"MIT"
] | 226
|
2017-12-12T21:46:31.000Z
|
2022-02-18T05:17:03.000Z
|
library/azure_rm_trafficmanagerprofile.py
|
joaocc/azure_preview_modules
|
2413dafa6f979a2070843b073830901cc1b1d868
|
[
"MIT"
] | 60
|
2018-01-25T10:03:59.000Z
|
2022-03-08T10:19:54.000Z
|
#!/usr/bin/python
#
# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com> Yunge Zhu <yungez@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_trafficmanagerprofile
version_added: "2.7"
short_description: Manage Azure Traffic Manager profile
description:
- Create, update and delete a Traffic Manager profile.
options:
resource_group:
description:
- Name of a resource group where the Traffic Manager profile exists or will be created.
required: true
name:
description:
- Name of the Traffic Manager profile.
required: true
state:
description:
- Assert the state of the Traffic Manager profile. Use C(present) to create or update a Traffic Manager profile and C(absent) to delete it.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to C(global) because in default public Azure cloud, Traffic Manager profile can only be deployed globally.
- Reference U(https://docs.microsoft.com/en-us/azure/traffic-manager/quickstart-create-traffic-manager-profile#create-a-traffic-manager-profile).
default: global
profile_status:
description:
- The status of the Traffic Manager profile.
default: enabled
choices:
- enabled
- disabled
routing_method:
description:
- The traffic routing method of the Traffic Manager profile.
default: performance
choices:
- performance
- priority
- weighted
- geographic
dns_config:
description:
- The DNS settings of the Traffic Manager profile.
suboptions:
relative_name:
description:
- The relative DNS name provided by this Traffic Manager profile.
- If not provided, name of the Traffic Manager will be used.
ttl:
description:
- The DNS Time-To-Live (TTL), in seconds.
type: int
default: 60
monitor_config:
description:
- The endpoint monitoring settings of the Traffic Manager profile.
suboptions:
protocol:
description:
- The protocol C(HTTP), C(HTTPS) or C(TCP) used to probe for endpoint health.
choices:
- HTTP
- HTTPS
- TCP
port:
description:
- The TCP port used to probe for endpoint health.
path:
description:
- The path relative to the endpoint domain name used to probe for endpoint health.
interval:
description:
- The monitor interval for endpoints in this profile in seconds.
type: int
timeout:
description:
- The monitor timeout for endpoints in this profile in seconds.
type: int
tolerated_failures:
description:
- The number of consecutive failed health check before declaring an endpoint in this profile Degraded after the next failed health check.
default:
protocol: HTTP
port: 80
path: /
extends_documentation_fragment:
- azure
- azure_tags
author:
- Hai Cao (@caohai)
- Yunge Zhu (@yungezz)
'''
EXAMPLES = '''
- name: Create a Traffic Manager Profile
azure_rm_trafficmanagerprofile:
name: tmtest
resource_group: myResourceGroup
location: global
profile_status: enabled
routing_method: priority
dns_config:
relative_name: tmtest
ttl: 60
monitor_config:
protocol: HTTPS
port: 80
path: '/'
tags:
Environment: Test
- name: Delete a Traffic Manager Profile
azure_rm_trafficmanagerprofile:
state: absent
name: tmtest
resource_group: myResourceGroup
'''
RETURN = '''
id:
description:
- The ID of the traffic manager profile.
returned: when traffic manager profile exists
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tmtest"
endpoints:
description:
- List of endpoint IDs attached to the profile.
returned: when traffic manager endpoints exists
type: list
sample: [
"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tm049b1ae293/exter
nalEndpoints/e2",
"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tm049b1ae293/exter
nalEndpoints/e1"
]
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.trafficmanager.models import (
Profile, Endpoint, DnsConfig, MonitorConfig
)
except ImportError:
# This is handled in azure_rm_common
pass
def shorten_traffic_manager_dict(tmd):
return dict(
id=tmd['id'],
endpoints=[endpoint['id'] for endpoint in tmd['endpoints']] if tmd['endpoints'] else []
)
def traffic_manager_profile_to_dict(tmp):
result = dict(
id=tmp.id,
name=tmp.name,
type=tmp.type,
tags=tmp.tags,
location=tmp.location,
profile_status=tmp.profile_status,
routing_method=tmp.traffic_routing_method,
dns_config=dict(),
monitor_config=dict(),
endpoints=[]
)
if tmp.dns_config:
result['dns_config']['relative_name'] = tmp.dns_config.relative_name
result['dns_config']['fqdn'] = tmp.dns_config.fqdn
result['dns_config']['ttl'] = tmp.dns_config.ttl
if tmp.monitor_config:
result['monitor_config']['profile_monitor_status'] = tmp.monitor_config.profile_monitor_status
result['monitor_config']['protocol'] = tmp.monitor_config.protocol
result['monitor_config']['port'] = tmp.monitor_config.port
result['monitor_config']['path'] = tmp.monitor_config.path
result['monitor_config']['interval'] = tmp.monitor_config.interval_in_seconds
result['monitor_config']['timeout'] = tmp.monitor_config.timeout_in_seconds
result['monitor_config']['tolerated_failures'] = tmp.monitor_config.tolerated_number_of_failures
if tmp.endpoints:
for endpoint in tmp.endpoints:
result['endpoints'].append(dict(
id=endpoint.id,
name=endpoint.name,
type=endpoint.type,
target_resource_id=endpoint.target_resource_id,
target=endpoint.target,
endpoint_status=endpoint.endpoint_status,
weight=endpoint.weight,
priority=endpoint.priority,
endpoint_location=endpoint.endpoint_location,
endpoint_monitor_status=endpoint.endpoint_monitor_status,
min_child_endpoints=endpoint.min_child_endpoints,
geo_mapping=endpoint.geo_mapping
))
return result
def create_dns_config_instance(dns_config):
return DnsConfig(
relative_name=dns_config['relative_name'],
ttl=dns_config['ttl']
)
def create_monitor_config_instance(monitor_config):
return MonitorConfig(
profile_monitor_status=monitor_config['profile_monitor_status'],
protocol=monitor_config['protocol'],
port=monitor_config['port'],
path=monitor_config['path'],
interval_in_seconds=monitor_config['interval'],
timeout_in_seconds=monitor_config['timeout'],
tolerated_number_of_failures=monitor_config['tolerated_failures']
)
dns_config_spec = dict(
relative_name=dict(type='str'),
ttl=dict(type='int')
)
monitor_config_spec = dict(
profile_monitor_status=dict(type='str'),
protocol=dict(type='str'),
port=dict(type='int'),
path=dict(type='str'),
interval=dict(type='int'),
timeout=dict(type='int'),
tolerated_failures=dict(type='int')
)
class AzureRMTrafficManagerProfile(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
default='global'
),
profile_status=dict(
type='str',
default='enabled',
choices=['enabled', 'disabled']
),
routing_method=dict(
type='str',
default='performance',
choices=['performance', 'priority', 'weighted', 'geographic']
),
dns_config=dict(
type='dict',
options=dns_config_spec
),
monitor_config=dict(
type='dict',
default=dict(
protocol='HTTP',
port=80,
path='/'
),
options=monitor_config_spec
),
)
self.resource_group = None
self.name = None
self.state = None
self.tags = None
self.location = None
self.profile_status = None
self.routing_method = None
self.dns_config = None
self.monitor_config = None
self.endpoints_copy = None
self.results = dict(
changed=False
)
super(AzureRMTrafficManagerProfile, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
to_be_updated = False
if not self.dns_config:
self.dns_config = dict(
relative_name=self.name,
ttl=60
)
if not self.location:
self.location = 'global'
response = self.get_traffic_manager_profile()
if self.state == 'present':
if not response:
to_be_updated = True
else:
self.results = shorten_traffic_manager_dict(response)
self.log('Results : {0}'.format(response))
update_tags, response['tags'] = self.update_tags(response['tags'])
if update_tags:
to_be_updated = True
to_be_updated = to_be_updated or self.check_update(response)
if to_be_updated:
self.log("Need to Create / Update the Traffic Manager profile")
if not self.check_mode:
self.results = shorten_traffic_manager_dict(self.create_update_traffic_manager_profile())
self.log("Creation / Update done.")
self.results['changed'] = True
return self.results
elif self.state == 'absent' and response:
self.log("Need to delete the Traffic Manager profile")
self.results = shorten_traffic_manager_dict(response)
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_traffic_manager_profile()
self.log("Traffic Manager profile deleted")
return self.results
def get_traffic_manager_profile(self):
'''
Gets the properties of the specified Traffic Manager profile
:return: deserialized Traffic Manager profile dict
'''
self.log("Checking if Traffic Manager profile {0} is present".format(self.name))
try:
response = self.traffic_manager_management_client.profiles.get(self.resource_group, self.name)
self.log("Response : {0}".format(response))
self.log("Traffic Manager profile : {0} found".format(response.name))
self.endpoints_copy = response.endpoints if response and response.endpoints else None
return traffic_manager_profile_to_dict(response)
except CloudError:
self.log('Did not find the Traffic Manager profile.')
return False
def delete_traffic_manager_profile(self):
'''
Deletes the specified Traffic Manager profile in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Traffic Manager profile {0}".format(self.name))
try:
operation_result = self.traffic_manager_management_client.profiles.delete(self.resource_group, self.name)
return True
except CloudError as e:
self.log('Error attempting to delete the Traffic Manager profile.')
self.fail("Error deleting the Traffic Manager profile: {0}".format(e.message))
return False
def create_update_traffic_manager_profile(self):
'''
Creates or updates a Traffic Manager profile.
:return: deserialized Traffic Manager profile state dictionary
'''
self.log("Creating / Updating the Traffic Manager profile {0}".format(self.name))
parameters = Profile(
tags=self.tags,
location=self.location,
profile_status=self.profile_status,
traffic_routing_method=self.routing_method,
dns_config=create_dns_config_instance(self.dns_config) if self.dns_config else None,
monitor_config=create_monitor_config_instance(self.monitor_config) if self.monitor_config else None,
endpoints=self.endpoints_copy
)
try:
response = self.traffic_manager_management_client.profiles.create_or_update(self.resource_group, self.name, parameters)
return traffic_manager_profile_to_dict(response)
except CloudError as exc:
self.log('Error attempting to create the Traffic Manager.')
self.fail("Error creating the Traffic Manager: {0}".format(exc.message))
def check_update(self, response):
if self.location and normalize_location_name(response['location']) != normalize_location_name(self.location):
self.log("Location Diff - Origin {0} / Update {1}".format(response['location'], self.location))
return True
if self.profile_status and response['profile_status'].lower() != self.profile_status:
self.log("Profile Status Diff - Origin {0} / Update {1}".format(response['profile_status'], self.profile_status))
return True
if self.routing_method and response['routing_method'].lower() != self.routing_method:
self.log("Traffic Routing Method Diff - Origin {0} / Update {1}".format(response['routing_method'], self.routing_method))
return True
if self.dns_config and \
(response['dns_config']['relative_name'] != self.dns_config['relative_name'] or response['dns_config']['ttl'] != self.dns_config['ttl']):
self.log("DNS Config Diff - Origin {0} / Update {1}".format(response['dns_config'], self.dns_config))
return True
for k, v in self.monitor_config.items():
if v:
if str(v).lower() != str(response['monitor_config'][k]).lower():
self.log("Monitor Config Diff - Origin {0} / Update {1}".format(response['monitor_config'], self.monitor_config))
return True
return False
def main():
"""Main execution"""
AzureRMTrafficManagerProfile()
if __name__ == '__main__':
main()
| 35.941935
| 157
| 0.603841
|
4a1535c7fd167a40b61b6989ab2bf43e78c76f26
| 34,045
|
py
|
Python
|
scipy/fftpack/tests/test_basic.py
|
clementkng/scipy
|
8e40c067b64016dc344c537655f56454fd1788ba
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/fftpack/tests/test_basic.py
|
clementkng/scipy
|
8e40c067b64016dc344c537655f56454fd1788ba
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/fftpack/tests/test_basic.py
|
clementkng/scipy
|
8e40c067b64016dc344c537655f56454fd1788ba
|
[
"BSD-3-Clause"
] | null | null | null |
# Created by Pearu Peterson, September 2002
from __future__ import division, print_function, absolute_import
__usage__ = """
Build fftpack:
python setup_fftpack.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.fftpack.test()'
Run tests if fftpack is not installed:
python tests/test_basic.py
"""
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_less)
import pytest
from pytest import raises as assert_raises
from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2
from scipy.fftpack import _fftpack as fftpack
from scipy.fftpack.basic import _is_safe_size
from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
swapaxes, double, cdouble)
import numpy as np
import numpy.fft
from numpy.random import rand
# "large" composite numbers supported by FFTPACK
LARGE_COMPOSITE_SIZES = [
2**13,
2**5 * 3**5,
2**3 * 3**3 * 5**2,
]
SMALL_COMPOSITE_SIZES = [
2,
2*3*5,
2*2*3*3,
]
# prime
LARGE_PRIME_SIZES = [
2011
]
SMALL_PRIME_SIZES = [
29
]
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = "size: %s rdt: %s" % (size, rdt)
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
def random(size):
return rand(*size)
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = fft(x, axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = ifft(x, axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
r = zeros(n, dtype=double)
for i in range(n//2+1):
y = dot(exp(i*w), x)
if i:
r[2*i-1] = y.real
if 2*i < n:
r[2*i] = y.imag
else:
r[0] = y.real
return r
def direct_irdft(x):
x = asarray(x)
n = len(x)
x1 = zeros(n, dtype=cdouble)
for i in range(n//2+1):
if i:
if 2*i < n:
x1[i] = x[2*i-1] + 1j*x[2*i]
x1[n-i] = x[2*i-1] - 1j*x[2*i]
else:
x1[i] = x[2*i-1]
else:
x1[0] = x[0]
return direct_idft(x1).real
class _TestFFTBase(object):
def setup_method(self):
self.cdt = None
self.rdt = None
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
y = fft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = list(range(n))
y = fftpack.zfft(x)
y2 = numpy.fft.fft(x)
assert_array_almost_equal(y,y2)
y = fftpack.zrfft(x)
assert_array_almost_equal(y,y2)
def test_invalid_sizes(self):
assert_raises(ValueError, fft, [])
assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
def test__is_safe_size(self):
vals = [(0, True), (1, True), (2, True), (3, True), (4, True), (5, True), (6, True), (7, False),
(15, True), (16, True), (17, False), (18, True), (21, False), (25, True), (50, True),
(120, True), (210, False)]
for n, is_safe in vals:
assert_equal(_is_safe_size(n), is_safe)
class TestDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
@pytest.mark.xfail(run=False, reason="single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved")
def test_notice(self):
pass
class TestFloat16FFT(object):
def test_1_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft(x1, n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (4, ))
assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def test_n_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
x2 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft([x1, x2], n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (2, 4))
assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
class _TestIFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = list(range(n))
y = fftpack.zfft(x,direction=-1)
y2 = numpy.fft.ifft(x)
assert_array_almost_equal(y,y2)
y = fftpack.zrfft(x,direction=-1)
assert_array_almost_equal(y,y2)
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
class TestDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.rdt)
def test_djbfft(self):
from numpy.fft import fft as numpy_fft
for i in range(2,14):
n = 2**i
x = list(range(n))
y2 = numpy_fft(x)
y1 = zeros((n,),dtype=double)
y1[0] = y2[0].real
y1[-1] = y2[n//2].real
for k in range(1, n//2):
y1[2*k-1] = y2[k].real
y1[2*k] = y2[k].imag
y = fftpack.drfft(x)
assert_array_almost_equal(y,y1)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
# See gh-5790
class MockSeries(object):
def __init__(self, data):
self.data = np.asarray(data)
def __getattr__(self, item):
try:
return getattr(self.data, item)
except AttributeError:
raise AttributeError(("'MockSeries' object "
"has no attribute '{attr}'".
format(attr=item)))
def test_non_ndarray_with_dtype(self):
x = np.array([1., 2., 3., 4., 5.])
xs = _TestRFFTBase.MockSeries(x)
expected = [1, 2, 3, 4, 5]
out = rfft(xs)
# Data should not have been overwritten
assert_equal(x, expected)
assert_equal(xs.data, expected)
class TestRFFTDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2,3,4,1,2,3,4]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x2 = [1,2,3,4,1,2,3,4,5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.rdt))
y1 = direct_irdft(x)
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_djbfft(self):
from numpy.fft import ifft as numpy_ifft
for i in range(2,14):
n = 2**i
x = list(range(n))
x1 = zeros((n,),dtype=cdouble)
x1[0] = x[0]
for k in range(1, n//2):
x1[k] = x[2*k-1]+1j*x[2*k]
x1[n-k] = x[2*k-1]-1j*x[2*k]
x1[n//2] = x[-1]
y1 = numpy_ifft(x1)
y = fftpack.drfft(x,direction=-1)
assert_array_almost_equal(y,y1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x))
y2 = rfft(irfft(x))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.rdt)
assert_array_almost_equal(y1, x, decimal=self.ndec,
err_msg="size=%d" % size)
assert_array_almost_equal(y2, x, decimal=self.ndec,
err_msg="size=%d" % size)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
class TestIRFFTDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTSingle(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.ndec = 5
class Testfft2(object):
def setup_method(self):
np.random.seed(1234)
def test_regression_244(self):
"""FFT returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4, 4, 2))
y = fft2(x, shape=(8, 8), axes=(-3, -2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
class TestFftnSingle(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float32))
assert_(y.dtype == np.complex64,
msg="double precision output with single precision")
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_size_accuracy_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_size_accuracy_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
def test_definition_float16(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float16))
assert_equal(y.dtype, np.complex64)
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_float16_input_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 5e5)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_float16_input_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2e6)
class TestFftn(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(x)
assert_array_almost_equal(y, direct_dftn(x))
x = random((20, 26))
assert_array_almost_equal(fftn(x), direct_dftn(x))
x = random((5, 4, 3, 20))
assert_array_almost_equal(fftn(x), direct_dftn(x))
def test_axes_argument(self):
# plane == ji_plane, x== kji_space
plane1 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
plane2 = [[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
plane3 = [[19, 20, 21],
[22, 23, 24],
[25, 26, 27]]
ki_plane1 = [[1, 2, 3],
[10, 11, 12],
[19, 20, 21]]
ki_plane2 = [[4, 5, 6],
[13, 14, 15],
[22, 23, 24]]
ki_plane3 = [[7, 8, 9],
[16, 17, 18],
[25, 26, 27]]
jk_plane1 = [[1, 10, 19],
[4, 13, 22],
[7, 16, 25]]
jk_plane2 = [[2, 11, 20],
[5, 14, 23],
[8, 17, 26]]
jk_plane3 = [[3, 12, 21],
[6, 15, 24],
[9, 18, 27]]
kj_plane1 = [[1, 4, 7],
[10, 13, 16], [19, 22, 25]]
kj_plane2 = [[2, 5, 8],
[11, 14, 17], [20, 23, 26]]
kj_plane3 = [[3, 6, 9],
[12, 15, 18], [21, 24, 27]]
ij_plane1 = [[1, 4, 7],
[2, 5, 8],
[3, 6, 9]]
ij_plane2 = [[10, 13, 16],
[11, 14, 17],
[12, 15, 18]]
ij_plane3 = [[19, 22, 25],
[20, 23, 26],
[21, 24, 27]]
ik_plane1 = [[1, 10, 19],
[2, 11, 20],
[3, 12, 21]]
ik_plane2 = [[4, 13, 22],
[5, 14, 23],
[6, 15, 24]]
ik_plane3 = [[7, 16, 25],
[8, 17, 26],
[9, 18, 27]]
ijk_space = [jk_plane1, jk_plane2, jk_plane3]
ikj_space = [kj_plane1, kj_plane2, kj_plane3]
jik_space = [ik_plane1, ik_plane2, ik_plane3]
jki_space = [ki_plane1, ki_plane2, ki_plane3]
kij_space = [ij_plane1, ij_plane2, ij_plane3]
x = array([plane1, plane2, plane3])
assert_array_almost_equal(fftn(x),
fftn(x, axes=(-3, -2, -1))) # kji_space
assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
y = fftn(x, axes=(2, 1, 0)) # ijk_space
assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
y = fftn(x, axes=(2, 0, 1)) # ikj_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
fftn(ikj_space))
y = fftn(x, axes=(1, 2, 0)) # jik_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
fftn(jik_space))
y = fftn(x, axes=(1, 0, 2)) # jki_space
assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
y = fftn(x, axes=(0, 2, 1)) # kij_space
assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
y = fftn(x, axes=(-2, -1)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(1, 2)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(-3, -2)) # kj_plane
assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
y = fftn(x, axes=(-3, -1)) # ki_plane
assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
y = fftn(x, axes=(-1, -2)) # ij_plane
assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
y = fftn(x, axes=(-1, -3)) # ik_plane
assert_array_almost_equal(fftn(ik_plane1),
swapaxes(y[:, 0, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane2),
swapaxes(y[:, 1, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane3),
swapaxes(y[:, 2, :], -1, -2))
y = fftn(x, axes=(-2, -3)) # jk_plane
assert_array_almost_equal(fftn(jk_plane1),
swapaxes(y[:, :, 0], -1, -2))
assert_array_almost_equal(fftn(jk_plane2),
swapaxes(y[:, :, 1], -1, -2))
assert_array_almost_equal(fftn(jk_plane3),
swapaxes(y[:, :, 2], -1, -2))
y = fftn(x, axes=(-1,)) # i_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
y = fftn(x, axes=(-2,)) # j_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
y = fftn(x, axes=(0,)) # k_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
y = fftn(x, axes=()) # point
assert_array_almost_equal(y, x)
def test_shape_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6]]
large_x1 = [[1, 2, 3, 0],
[4, 5, 6, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
y = fftn(small_x, shape=(4, 4))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, shape=(3, 4))
assert_array_almost_equal(y, fftn(large_x1[:-1]))
def test_shape_axes_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
large_x1 = array([[1, 2, 3, 0],
[4, 5, 6, 0],
[7, 8, 9, 0],
[0, 0, 0, 0]])
y = fftn(small_x, shape=(4, 4), axes=(-2, -1))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, shape=(4, 4), axes=(-1, -2))
assert_array_almost_equal(y, swapaxes(
fftn(swapaxes(large_x1, -1, -2)), -1, -2))
def test_shape_axes_argument2(self):
# Change shape of the last axis
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-1,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-1, n=8))
# Change shape of an arbitrary axis which is not the last one
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-2,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-2, n=8))
# Change shape of axes: cf #244, where shape and axes were mixed up
x = numpy.random.random((4, 4, 2))
y = fftn(x, axes=(-3, -2), shape=(8, 8))
assert_array_almost_equal(y,
numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
def test_shape_argument_more(self):
x = zeros((4, 4, 2))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fftn(x, shape=(8, 8, 2, 1))
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
fftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
fftn([[1, 1], [2, 2]], (4, -3))
class TestIfftn(object):
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = ifftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
@pytest.mark.parametrize('maxnlp', [2000, 3500])
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random_complex(self, maxnlp, size):
x = random([size, size]) + 1j*random([size, size])
assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
ifftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
ifftn([[1, 1], [2, 2]], (4, -3))
class TestLongDoubleFailure(object):
def setup_method(self):
np.random.seed(1234)
def test_complex(self):
if np.dtype(np.longcomplex).itemsize == np.dtype(complex).itemsize:
# longdouble == double; so fft is supported
return
x = np.random.randn(10).astype(np.longdouble) + \
1j * np.random.randn(10).astype(np.longdouble)
for f in [fft, ifft]:
try:
f(x)
raise AssertionError("Type {0} not supported but does not fail" %
np.longcomplex)
except ValueError:
pass
def test_real(self):
if np.dtype(np.longdouble).itemsize == np.dtype(np.double).itemsize:
# longdouble == double; so fft is supported
return
x = np.random.randn(10).astype(np.longcomplex)
for f in [fft, ifft]:
try:
f(x)
raise AssertionError("Type %r not supported but does not fail" %
np.longcomplex)
except ValueError:
pass
class FakeArray(object):
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class FakeArray2(object):
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
class TestOverwrite(object):
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = [np.float32, np.float64]
dtypes = real_dtypes + [np.complex64, np.complex128]
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and fftsize <= shape[axis]
and (len(shape) == 1 or
(axis % len(shape) == len(shape)-1
and fftsize == shape[axis])))
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
if axes is None:
part_shape = shape
else:
part_shape = tuple(np.take(shape, axes))
for fftshape in fftshape_iter(part_shape):
should_overwrite = (overwrite_x
and data.ndim == 1
and np.all([x < y for x, y in zip(fftshape,
part_shape)])
and dtype in overwritable_dtypes)
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
if data.ndim > 1:
# check fortran order: it never overwrites
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=False)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
| 34.989723
| 161
| 0.521016
|
4a15364707983f92f19c391e9f5aaaacb181c5d2
| 4,978
|
py
|
Python
|
fixture/contact.py
|
18kotov/python_test_learning
|
08bc41c921b6d51fc1bab5d239e3f871f25439ba
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
18kotov/python_test_learning
|
08bc41c921b6d51fc1bab5d239e3f871f25439ba
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
18kotov/python_test_learning
|
08bc41c921b6d51fc1bab5d239e3f871f25439ba
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
class ContactHelper():
def __init__(self, app):
self.app = app
def add_new(self, contact):
#fill_contact_info
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
#clic
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
def open_add_new_page(self):
wd = self.app.wd
if not wd.current_url.endswith("/edit.php"):
wd.find_element_by_link_text("add new").click()
def go_home_page(self):
wd = self.app.wd
if not wd.current_url.endswith("/addressbook/"):
wd.find_element_by_link_text("home").click()
def edit_first_contact(self,contact):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
wd.find_element_by_xpath("//img[@src='icons/pencil.png']").click()
# fill_contact_info
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
# submit
wd.find_element_by_name("update").click()
def delete_first_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
# self.return_to_groups_page()
def count(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
return len(wd.find_elements_by_name("selected[]"))
def get_contact_list(self):
wd = self.app.wd
# self.go_home_page()
contacts = []
for element in wd.find_elements_by_xpath("//tr[@name='entry']"):
cells = element.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
id = element.find_element_by_name("selected[]").get_attribute("value")
contacts.append(Contact(firstname=firstname, lastname=lastname, id=id))
return contacts
| 43.286957
| 83
| 0.665729
|
4a153689fd04f6b9a4d0cebb847492f362ac8182
| 2,109
|
py
|
Python
|
pyravendb/tools/custom_decoder.py
|
w0pr/ravendb-python-client
|
b073cbefcb09b9fdeb8e72fedf3a7648ef5aac87
|
[
"MIT"
] | 1
|
2019-11-13T18:08:41.000Z
|
2019-11-13T18:08:41.000Z
|
pyravendb/tools/custom_decoder.py
|
w0pr/ravendb-python-client
|
b073cbefcb09b9fdeb8e72fedf3a7648ef5aac87
|
[
"MIT"
] | null | null | null |
pyravendb/tools/custom_decoder.py
|
w0pr/ravendb-python-client
|
b073cbefcb09b9fdeb8e72fedf3a7648ef5aac87
|
[
"MIT"
] | null | null | null |
from pyravendb.tools.utils import Utils
import json
class JsonDecoder(json.JSONDecoder):
"""
This custom JsonDecoder can add to json.loads function to work with pyravendb mappers solution.
just use it like this json.loads(YOUR_OBJECT, cls=JsonDecoder, object_mapper=YOUR_MAPPER)
Note that that last object that returns from the loads function will be a dict.
To get the dict as you custom object you can create it with the return dict or use the parse_json method
"""
def __init__(self, **kwargs):
self.object_mapper = kwargs.pop("object_mapper", lambda key, value: None)
super(JsonDecoder, self).__init__(object_hook=self.object_hook)
self.step_down = None
def object_hook(self, dict_object):
if self.step_down is not None:
for key in dict_object:
result = self.object_mapper(key, dict_object[key])
if result is not None:
dict_object[key] = result
self.step_down = dict_object
return self.step_down
def parse_json(json_string, object_type, mappers):
"""
This function will use the custom JsonDecoder and the conventions.mappers to recreate your custom object
in the parse json string state just call this method with the json_string your complete object_type and with your
mappers dict.
the mappers dict must contain in the key the object_type (ex. User) and the value will contain a method that get
key, value (the key will be the name of the object property we like to parse and the value
will be the properties of the object)
"""
obj = json.loads(json_string, cls=JsonDecoder, object_mapper=mappers.get(object_type, None))
if obj is not None:
try:
obj = object_type(**obj)
except TypeError:
initialize_dict, set_needed = Utils.make_initialize_dict(obj, object_type.__init__)
o = object_type(**initialize_dict)
if set_needed:
for key, value in obj.items():
setattr(o, key, value)
obj = o
return obj
| 42.18
| 117
| 0.678046
|
4a15378bad804573f39d84a9def438ec28b057ce
| 11,801
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/authentication/key/state/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/authentication/key/state/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/authentication/key/state/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/authentication/key/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS authentication key state.
"""
__slots__ = ("_path_helper", "_extmethods", "__auth_password")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__auth_password = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="auth-password",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:routing-password",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"authentication",
"key",
"state",
]
def _get_auth_password(self):
"""
Getter method for auth_password, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/authentication/key/state/auth_password (oc-types:routing-password)
YANG Description: Authentication key string.
"""
return self.__auth_password
def _set_auth_password(self, v, load=False):
"""
Setter method for auth_password, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/authentication/key/state/auth_password (oc-types:routing-password)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_password is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_password() directly.
YANG Description: Authentication key string.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="auth-password",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:routing-password",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """auth_password must be of a type compatible with oc-types:routing-password""",
"defined-type": "oc-types:routing-password",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="auth-password", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:routing-password', is_config=False)""",
}
)
self.__auth_password = t
if hasattr(self, "_set"):
self._set()
def _unset_auth_password(self):
self.__auth_password = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="auth-password",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:routing-password",
is_config=False,
)
auth_password = __builtin__.property(_get_auth_password)
_pyangbind_elements = OrderedDict([("auth_password", auth_password)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/authentication/key/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS authentication key state.
"""
__slots__ = ("_path_helper", "_extmethods", "__auth_password")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__auth_password = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="auth-password",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:routing-password",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"authentication",
"key",
"state",
]
def _get_auth_password(self):
"""
Getter method for auth_password, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/authentication/key/state/auth_password (oc-types:routing-password)
YANG Description: Authentication key string.
"""
return self.__auth_password
def _set_auth_password(self, v, load=False):
"""
Setter method for auth_password, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/authentication/key/state/auth_password (oc-types:routing-password)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_password is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_password() directly.
YANG Description: Authentication key string.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="auth-password",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:routing-password",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """auth_password must be of a type compatible with oc-types:routing-password""",
"defined-type": "oc-types:routing-password",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="auth-password", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:routing-password', is_config=False)""",
}
)
self.__auth_password = t
if hasattr(self, "_set"):
self._set()
def _unset_auth_password(self):
self.__auth_password = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="auth-password",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:routing-password",
is_config=False,
)
auth_password = __builtin__.property(_get_auth_password)
_pyangbind_elements = OrderedDict([("auth_password", auth_password)])
| 38.947195
| 372
| 0.610118
|
4a153842f43ce6e52659ad908545792ab52541a3
| 1,052
|
py
|
Python
|
examples/py/rsi.py
|
Joukahainen/ccxt
|
82823a85b96cee336853f0deb353474df2122b88
|
[
"MIT"
] | 2
|
2022-03-10T15:21:49.000Z
|
2022-03-10T15:22:01.000Z
|
examples/py/rsi.py
|
Joukahainen/ccxt
|
82823a85b96cee336853f0deb353474df2122b88
|
[
"MIT"
] | null | null | null |
examples/py/rsi.py
|
Joukahainen/ccxt
|
82823a85b96cee336853f0deb353474df2122b88
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import pandas_ta as ta
import pandas as pd
import ccxt
# -----------------------------------------------------------------------------
print('CCXT Version:', ccxt.__version__)
# -----------------------------------------------------------------------------
exchange = ccxt.binance()
symbol = 'BTC/USDT'
timeframe = '1m'
limit = 50
while True:
try:
ohlcv = exchange.fetch_ohlcv(symbol, timeframe)
print('--------------------------------------------------------------')
if len(ohlcv):
df = pd.DataFrame(ohlcv, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df['time'] = pd.to_datetime(df['time'], unit='ms')
df = pd.concat([df, df.ta.rsi()], axis=1)
print(df[-20:])
print(exchange.iso8601 (exchange.milliseconds()))
except Exception as e:
print(type(e).__name__, str(e))
| 29.222222
| 96
| 0.476236
|
4a153852ec5812bddf6c67fa6914f00ed50d5b4d
| 395
|
py
|
Python
|
Instagram/wsgi.py
|
NzauM/Instagram
|
6db2e3568cd4947d6fce1202ca04f2bdc3be16f5
|
[
"MIT"
] | null | null | null |
Instagram/wsgi.py
|
NzauM/Instagram
|
6db2e3568cd4947d6fce1202ca04f2bdc3be16f5
|
[
"MIT"
] | 5
|
2020-02-12T03:20:23.000Z
|
2021-09-08T01:33:41.000Z
|
Instagram/wsgi.py
|
NzauM/Instagram
|
6db2e3568cd4947d6fce1202ca04f2bdc3be16f5
|
[
"MIT"
] | null | null | null |
"""
WSGI config for Instagram project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Instagram.settings")
application = get_wsgi_application()
| 23.235294
| 78
| 0.787342
|
4a153a3ba790625cbbdb1b99d784aaf2a8920628
| 239
|
py
|
Python
|
Delete_Vowels.py
|
NehaKohad/HacktoberFestContribute
|
4b07b4db6eb741eff347ca16ffbe1562b3d283e1
|
[
"MIT"
] | null | null | null |
Delete_Vowels.py
|
NehaKohad/HacktoberFestContribute
|
4b07b4db6eb741eff347ca16ffbe1562b3d283e1
|
[
"MIT"
] | null | null | null |
Delete_Vowels.py
|
NehaKohad/HacktoberFestContribute
|
4b07b4db6eb741eff347ca16ffbe1562b3d283e1
|
[
"MIT"
] | null | null | null |
v=['a','e','i','o','u','A','E','I','U','O']
str = input("Enter String")
delstr=[]
j=0
for i in str:
print(i)
if i not in v:
delstr.append(i)
j=j+1
print("String After Deleting Vowels :",''.join(delstr))
| 19.916667
| 56
| 0.493724
|
4a153b505ee36ecc24d616d10b9294fb104058b9
| 527
|
py
|
Python
|
frameworks/kafka/tests/test_soak.py
|
elezar/dcos-commons
|
b7b3aeec1d5b6dc9073ba07000d4e48784143846
|
[
"Apache-2.0"
] | 7
|
2017-11-02T05:26:40.000Z
|
2020-01-27T19:33:52.000Z
|
frameworks/kafka/tests/test_soak.py
|
elezar/dcos-commons
|
b7b3aeec1d5b6dc9073ba07000d4e48784143846
|
[
"Apache-2.0"
] | 14
|
2017-09-20T22:47:48.000Z
|
2020-09-11T19:54:25.000Z
|
frameworks/kafka/tests/test_soak.py
|
AlexRogalskiy/dcos-commons
|
85711f05bc94172aabb6837f9ff529721437d20c
|
[
"Apache-2.0"
] | 9
|
2017-11-14T19:43:07.000Z
|
2022-01-06T12:44:49.000Z
|
import json
import pytest
import sdk_upgrade
from tests import config
@pytest.mark.soak_upgrade
def test_soak_upgrade_downgrade():
""" Assumes that the install options file is placed in the repo root directory by the user.
"""
with open('kafka.json') as options_file:
install_options = json.load(options_file)
sdk_upgrade.soak_upgrade_downgrade(
config.PACKAGE_NAME,
install_options["service"]["name"],
config.DEFAULT_BROKER_COUNT,
additional_options=install_options)
| 27.736842
| 95
| 0.73055
|
4a153b7a365a19ee2ebb52a26654ce4fe9eef440
| 57,731
|
py
|
Python
|
nova/tests/unit/api/openstack/compute/test_hypervisors.py
|
hemanthnakkina/nova
|
3756f4ffa6ff670bfd6b491a12b833da0a36b017
|
[
"Apache-2.0"
] | 1
|
2021-06-10T17:08:15.000Z
|
2021-06-10T17:08:15.000Z
|
nova/tests/unit/api/openstack/compute/test_hypervisors.py
|
hemanthnakkina/nova
|
3756f4ffa6ff670bfd6b491a12b833da0a36b017
|
[
"Apache-2.0"
] | 2
|
2021-03-31T20:04:16.000Z
|
2021-12-13T20:45:03.000Z
|
nova/tests/unit/api/openstack/compute/test_hypervisors.py
|
hemanthnakkina/nova
|
3756f4ffa6ff670bfd6b491a12b833da0a36b017
|
[
"Apache-2.0"
] | 1
|
2020-07-24T02:31:45.000Z
|
2020-07-24T02:31:45.000Z
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import netaddr
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import six
from webob import exc
from nova.api.openstack.compute import hypervisors \
as hypervisors_v21
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CPU_INFO = """
{"arch": "x86_64",
"vendor": "fake",
"topology": {"cores": 1, "threads": 1, "sockets": 1},
"features": [],
"model": ""}"""
TEST_HYPERS = [
dict(id=1,
uuid=uuids.hyper1,
service_id=1,
host="compute1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info=CPU_INFO,
disk_available_least=100,
host_ip=netaddr.IPAddress('1.1.1.1')),
dict(id=2,
uuid=uuids.hyper2,
service_id=2,
host="compute2",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info=CPU_INFO,
disk_available_least=100,
host_ip=netaddr.IPAddress('2.2.2.2'))]
TEST_SERVICES = [
objects.Service(id=1,
uuid=uuids.service1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
objects.Service(id=2,
uuid=uuids.service2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
]
TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct)
for hyper_dct in TEST_HYPERS]
TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})
TEST_SERVERS = [dict(name="inst1", uuid=uuids.instance_1, host="compute1"),
dict(name="inst2", uuid=uuids.instance_2, host="compute2"),
dict(name="inst3", uuid=uuids.instance_3, host="compute1"),
dict(name="inst4", uuid=uuids.instance_4, host="compute2")]
def fake_compute_node_get_all(context, limit=None, marker=None):
if marker in ['99999', uuids.invalid_marker]:
raise exception.MarkerNotFound(marker)
marker_found = True if marker is None else False
output = []
for hyper in TEST_HYPERS_OBJ:
# Starting with the 2.53 microversion, the marker is a uuid.
if not marker_found and marker in (str(hyper.id), hyper.uuid):
marker_found = True
elif marker_found:
if limit is None or len(output) < int(limit):
output.append(hyper)
return output
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS_OBJ
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS_OBJ:
if hyper.uuid == compute_id or hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
def fake_service_get_by_compute_host(context, host):
for service in TEST_SERVICES:
if service.host == host:
return service
def fake_compute_node_statistics(context):
result = dict(
count=0,
vcpus=0,
memory_mb=0,
local_gb=0,
vcpus_used=0,
memory_mb_used=0,
local_gb_used=0,
free_ram_mb=0,
free_disk_gb=0,
current_workload=0,
running_vms=0,
disk_available_least=0,
)
for hyper in TEST_HYPERS_OBJ:
for key in result:
if key == 'count':
result[key] += 1
else:
result[key] += getattr(hyper, key)
return result
def fake_instance_get_all_by_host(context, host):
results = []
for inst in TEST_SERVERS:
if inst['host'] == host:
inst_obj = fake_instance.fake_instance_obj(context, **inst)
results.append(inst_obj)
return results
class HypervisorsTestV21(test.NoDBTestCase):
api_version = '2.1'
# Allow subclasses to override if the id value in the response is the
# compute node primary key integer id or the uuid.
expect_uuid_for_id = False
# TODO(stephenfin): These should just be defined here
TEST_HYPERS_OBJ = copy.deepcopy(TEST_HYPERS_OBJ)
TEST_SERVICES = copy.deepcopy(TEST_SERVICES)
TEST_SERVERS = copy.deepcopy(TEST_SERVERS)
DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
del DETAIL_HYPERS_DICTS[0]['service_id']
del DETAIL_HYPERS_DICTS[1]['service_id']
del DETAIL_HYPERS_DICTS[0]['host']
del DETAIL_HYPERS_DICTS[1]['host']
del DETAIL_HYPERS_DICTS[0]['uuid']
del DETAIL_HYPERS_DICTS[1]['uuid']
DETAIL_HYPERS_DICTS[0].update({'state': 'up',
'status': 'enabled',
'service': dict(id=1, host='compute1',
disabled_reason=None)})
DETAIL_HYPERS_DICTS[1].update({'state': 'up',
'status': 'enabled',
'service': dict(id=2, host='compute2',
disabled_reason=None)})
INDEX_HYPER_DICTS = [
dict(id=1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
DETAIL_NULL_CPUINFO_DICT = {'': '', None: None}
def _get_request(self, use_admin_context, url=''):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.api_version)
def _set_up_controller(self):
self.controller = hypervisors_v21.HypervisorsController()
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
def _get_hyper_id(self):
"""Helper function to get the proper hypervisor id for a request
:returns: The first hypervisor's uuid for microversions that expect a
uuid for the id, otherwise the hypervisor's id primary key
"""
return (self.TEST_HYPERS_OBJ[0].uuid if self.expect_uuid_for_id
else self.TEST_HYPERS_OBJ[0].id)
def setUp(self):
super(HypervisorsTestV21, self).setUp()
self._set_up_controller()
host_api = self.controller.host_api
host_api.compute_node_get_all = mock.MagicMock(
side_effect=fake_compute_node_get_all)
host_api.service_get_by_compute_host = mock.MagicMock(
side_effect=fake_service_get_by_compute_host)
host_api.compute_node_search_by_hypervisor = mock.MagicMock(
side_effect=fake_compute_node_search_by_hypervisor)
host_api.compute_node_get = mock.MagicMock(
side_effect=fake_compute_node_get)
self.stub_out('nova.db.api.compute_node_statistics',
fake_compute_node_statistics)
def test_view_hypervisor_nodetail_noservers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], False, req)
self.assertEqual(self.INDEX_HYPER_DICTS[0], result)
def test_view_hypervisor_detail_noservers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], True, req)
self.assertEqual(self.DETAIL_HYPERS_DICTS[0], result)
def test_view_hypervisor_servers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(self.TEST_HYPERS_OBJ[0],
self.TEST_SERVICES[0],
False, req,
self.TEST_SERVERS)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'servers': [
dict(name="inst1", uuid=uuids.instance_1),
dict(name="inst2", uuid=uuids.instance_2),
dict(name="inst3", uuid=uuids.instance_3),
dict(name="inst4", uuid=uuids.instance_4)]})
self.assertEqual(expected_dict, result)
def _test_view_hypervisor_detail_cpuinfo_null(self, cpu_info):
req = self._get_request(True)
test_hypervisor_obj = copy.deepcopy(self.TEST_HYPERS_OBJ[0])
test_hypervisor_obj.cpu_info = cpu_info
result = self.controller._view_hypervisor(test_hypervisor_obj,
self.TEST_SERVICES[0],
True, req)
expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
expected_dict.update({'cpu_info':
self.DETAIL_NULL_CPUINFO_DICT[cpu_info]})
self.assertEqual(result, expected_dict)
def test_view_hypervisor_detail_cpuinfo_empty_string(self):
self._test_view_hypervisor_detail_cpuinfo_null('')
def test_view_hypervisor_detail_cpuinfo_none(self):
self._test_view_hypervisor_detail_cpuinfo_null(None)
def test_index(self):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_index_compute_host_not_found(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].uuid if self.expect_uuid_for_id
else compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
self.assertDictEqual(expected, result['hypervisors'][0])
_test(self)
def test_index_compute_host_not_mapped(self):
"""Tests that we don't fail index if a host is not mapped."""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].uuid if self.expect_uuid_for_id
else compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
self.assertDictEqual(expected, result['hypervisors'][0])
_test(self)
def test_detail(self):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(dict(hypervisors=self.DETAIL_HYPERS_DICTS), result)
def test_detail_compute_host_not_found(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
# we don't care about all of the details, just make sure we get
# the subset we care about and there are more keys than what index
# would return
hypervisor = result['hypervisors'][0]
self.assertTrue(
set(expected.keys()).issubset(set(hypervisor.keys())))
self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
self.assertEqual(compute_nodes[0].hypervisor_hostname,
hypervisor['hypervisor_hostname'])
_test(self)
def test_detail_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
# we don't care about all of the details, just make sure we get
# the subset we care about and there are more keys than what index
# would return
hypervisor = result['hypervisors'][0]
self.assertTrue(
set(expected.keys()).issubset(set(hypervisor.keys())))
self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
self.assertEqual(compute_nodes[0].hypervisor_hostname,
hypervisor['hypervisor_hostname'])
_test(self)
def test_show_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
@mock.patch.object(self.controller.host_api, 'compute_node_get',
return_value=self.TEST_HYPERS_OBJ[0])
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host')
def _test(self, mock_service, mock_compute_node_get):
req = self._get_request(True)
mock_service.side_effect = exception.HostMappingNotFound(
name='foo')
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound, self.controller.show,
req, hyper_id)
self.assertTrue(mock_service.called)
mock_compute_node_get.assert_called_once_with(mock.ANY, hyper_id)
_test(self)
def test_show_noid(self):
req = self._get_request(True)
hyperid = uuids.hyper3 if self.expect_uuid_for_id else '3'
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, hyperid)
def test_show_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
def test_show_withid(self):
req = self._get_request(True)
hyper_id = self._get_hyper_id()
result = self.controller.show(req, hyper_id)
self.assertEqual(dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]), result)
def test_uptime_noid(self):
req = self._get_request(True)
hyper_id = uuids.hyper3 if self.expect_uuid_for_id else '3'
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req,
hyper_id)
def test_uptime_notimplemented(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exc.HTTPNotImplemented()
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req, hyper_id)
self.assertEqual(1, mock_get_uptime.call_count)
def test_uptime_implemented(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
return_value="fake uptime"
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
result = self.controller.uptime(req, hyper_id)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'uptime': "fake uptime"})
self.assertEqual(dict(hypervisor=expected_dict), result)
self.assertEqual(1, mock_get_uptime.call_count)
def test_uptime_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
def test_uptime_hypervisor_down(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exception.ComputeServiceUnavailable(host='dummy')
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPBadRequest,
self.controller.uptime, req, hyper_id)
mock_get_uptime.assert_called_once_with(
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_uptime_hypervisor_not_mapped_service_get(self):
@mock.patch.object(self.controller.host_api, 'compute_node_get')
@mock.patch.object(self.controller.host_api, 'get_host_uptime')
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
side_effect=exception.HostMappingNotFound(
name='dummy'))
def _test(mock_get, _, __):
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound,
self.controller.uptime, req, hyper_id)
self.assertTrue(mock_get.called)
_test()
def test_uptime_hypervisor_not_mapped(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exception.HostMappingNotFound(name='dummy')
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound,
self.controller.uptime, req, hyper_id)
mock_get_uptime.assert_called_once_with(
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_search(self):
req = self._get_request(True)
result = self.controller.search(req, 'hyper')
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_search_non_exist(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search,
req, 'a')
self.assertEqual(1, mock_node_search.call_count)
def test_search_unmapped(self):
@mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor')
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host')
def _test(mock_service, mock_search):
mock_search.return_value = [mock.MagicMock()]
mock_service.side_effect = exception.HostMappingNotFound(
name='foo')
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search,
req, 'a')
self.assertTrue(mock_service.called)
_test()
@mock.patch.object(objects.InstanceList, 'get_by_host',
side_effect=fake_instance_get_all_by_host)
def test_servers(self, mock_get):
req = self._get_request(True)
result = self.controller.servers(req, 'hyper')
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
expected_dict[0].update({'servers': [
dict(uuid=uuids.instance_1),
dict(uuid=uuids.instance_3)]})
expected_dict[1].update({'servers': [
dict(uuid=uuids.instance_2),
dict(uuid=uuids.instance_4)]})
for output in result['hypervisors']:
servers = output['servers']
for server in servers:
del server['name']
self.assertEqual(dict(hypervisors=expected_dict), result)
def test_servers_not_mapped(self):
req = self._get_request(True)
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host') as m:
m.side_effect = exception.HostMappingNotFound(name='something')
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'hyper')
def test_servers_non_id(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers,
req, '115')
self.assertEqual(1, mock_node_search.call_count)
def test_servers_with_non_integer_hypervisor_id(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'abc')
self.assertEqual(1, mock_node_search.call_count)
def test_servers_with_no_server(self):
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=[]) as mock_inst_get_all:
req = self._get_request(True)
result = self.controller.servers(req, self.TEST_HYPERS_OBJ[0].id)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
self.assertTrue(mock_inst_get_all.called)
def test_statistics(self):
req = self._get_request(True)
result = self.controller.statistics(req)
self.assertEqual(dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200)), result)
class HypervisorsTestV228(HypervisorsTestV21):
api_version = '2.28'
DETAIL_HYPERS_DICTS = copy.deepcopy(HypervisorsTestV21.DETAIL_HYPERS_DICTS)
DETAIL_HYPERS_DICTS[0]['cpu_info'] = jsonutils.loads(CPU_INFO)
DETAIL_HYPERS_DICTS[1]['cpu_info'] = jsonutils.loads(CPU_INFO)
DETAIL_NULL_CPUINFO_DICT = {'': {}, None: {}}
class HypervisorsTestV233(HypervisorsTestV228):
api_version = '2.33'
def test_index_pagination(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=1&marker=1')
result = self.controller.index(req)
expected = {
'hypervisors': [
{'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
],
'hypervisors_links': [
{'href': 'http://localhost/v2/os-hypervisors?limit=1&marker=2',
'rel': 'next'}
]
}
self.assertEqual(expected, result)
def test_index_pagination_with_invalid_marker(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?marker=99999')
self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req)
def test_index_pagination_with_invalid_non_int_limit(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=-9')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_pagination_with_invalid_string_limit(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_duplicate_query_parameters_with_invalid_string_limit(self):
req = self._get_request(
True,
'/v2/1234/os-hypervisors/?limit=1&limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_duplicate_query_parameters_validation(self):
expected = [{
'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
]
params = {
'limit': 1,
'marker': 1,
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?marker=1&%s=%s&%s=%s' %
(param, value, param, value))
result = self.controller.index(req)
self.assertEqual(expected, result['hypervisors'])
def test_index_pagination_with_additional_filter(self):
expected = {
'hypervisors': [
{'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
],
'hypervisors_links': [
{'href': 'http://localhost/v2/os-hypervisors?limit=1&marker=2',
'rel': 'next'}
]
}
req = self._get_request(
True, '/v2/1234/os-hypervisors?limit=1&marker=1&additional=3')
result = self.controller.index(req)
self.assertEqual(expected, result)
def test_detail_pagination(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=1')
result = self.controller.detail(req)
link = 'http://localhost/v2/os-hypervisors/detail?limit=1&marker=2'
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_detail_pagination_with_invalid_marker(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors/detail?marker=99999')
self.assertRaises(exc.HTTPBadRequest,
self.controller.detail, req)
def test_detail_pagination_with_invalid_string_limit(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors/detail?limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_detail_duplicate_query_parameters_with_invalid_string_limit(self):
req = self._get_request(
True,
'/v2/1234/os-hypervisors/detail?limit=1&limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_detail_duplicate_query_parameters_validation(self):
expected = [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
]
params = {
'limit': 1,
'marker': 1,
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?marker=1&%s=%s&%s=%s' %
(param, value, param, value))
result = self.controller.detail(req)
self.assertEqual(expected, result['hypervisors'])
def test_detail_pagination_with_additional_filter(self):
link = 'http://localhost/v2/os-hypervisors/detail?limit=1&marker=2'
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{
'href': link,
'rel': 'next'}]
}
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=1&unknown=2')
result = self.controller.detail(req)
self.assertEqual(expected, result)
class HypervisorsTestV252(HypervisorsTestV233):
"""This is a boundary test to make sure 2.52 works like 2.33."""
api_version = '2.52'
class HypervisorsTestV253(HypervisorsTestV252):
api_version = hypervisors_v21.UUID_FOR_ID_MIN_VERSION
expect_uuid_for_id = True
# This is an expected response for index().
INDEX_HYPER_DICTS = [
dict(id=uuids.hyper1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=uuids.hyper2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
def setUp(self):
super(HypervisorsTestV253, self).setUp()
# This is an expected response for detail().
for index, detail_hyper_dict in enumerate(self.DETAIL_HYPERS_DICTS):
detail_hyper_dict['id'] = TEST_HYPERS[index]['uuid']
detail_hyper_dict['service']['id'] = TEST_SERVICES[index].uuid
def test_servers(self):
"""Asserts that calling the servers route after 2.52 fails."""
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.servers,
self._get_request(True), 'hyper')
def test_servers_with_no_server(self):
"""Tests GET /os-hypervisors?with_servers=1 when there are no
instances on the given host.
"""
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=[]) as mock_inst_get_all:
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=1')
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
# instance_get_all_by_host is called for each hypervisor
self.assertEqual(2, mock_inst_get_all.call_count)
mock_inst_get_all.assert_has_calls((
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[0].host),
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[1].host)))
def test_servers_not_mapped(self):
"""Tests that instance_get_all_by_host fails with HostMappingNotFound.
"""
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=1')
with mock.patch.object(
self.controller.host_api, 'instance_get_all_by_host',
side_effect=exception.HostMappingNotFound(name='something')):
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=[]), result)
def test_list_with_servers(self):
"""Tests GET /os-hypervisors?with_servers=True"""
instances = [
objects.InstanceList(objects=[objects.Instance(
id=1, uuid=uuids.hyper1_instance1)]),
objects.InstanceList(objects=[objects.Instance(
id=2, uuid=uuids.hyper2_instance1)])]
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
side_effect=instances) as mock_inst_get_all:
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=True')
result = self.controller.index(req)
index_with_servers = copy.deepcopy(self.INDEX_HYPER_DICTS)
index_with_servers[0]['servers'] = [
{'name': 'instance-00000001', 'uuid': uuids.hyper1_instance1}]
index_with_servers[1]['servers'] = [
{'name': 'instance-00000002', 'uuid': uuids.hyper2_instance1}]
self.assertEqual(dict(hypervisors=index_with_servers), result)
# instance_get_all_by_host is called for each hypervisor
self.assertEqual(2, mock_inst_get_all.call_count)
mock_inst_get_all.assert_has_calls((
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[0].host),
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[1].host)))
def test_list_with_servers_invalid_parameter(self):
"""Tests using an invalid with_servers query parameter."""
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=invalid')
self.assertRaises(
exception.ValidationError, self.controller.index, req)
def test_list_with_hostname_pattern_and_paging_parameters(self):
"""This is a negative test to validate that trying to list hypervisors
with a hostname pattern and paging parameters results in a 400 error.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=foo&'
'limit=1&marker=%s' % uuids.marker)
ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index, req)
self.assertIn('Paging over hypervisors with the '
'hypervisor_hostname_pattern query parameter is not '
'supported.', six.text_type(ex))
def test_servers_with_non_integer_hypervisor_id(self):
"""This is a poorly named test, it's really checking the 404 case where
there is no match for the hostname pattern.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?with_servers=yes&'
'hypervisor_hostname_pattern=shenzhen')
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=objects.ComputeNodeList()) as s:
self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
def test_servers_non_id(self):
"""There is no reason to test this for 2.53 since the
/os-hypervisors/servers route is deprecated.
"""
pass
def test_search_old_route(self):
"""Asserts that calling the search route after 2.52 fails."""
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.search,
self._get_request(True), 'hyper')
def test_search(self):
"""Test listing hypervisors with details and using the
hypervisor_hostname_pattern query string.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=shenzhen')
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=objects.ComputeNodeList(
objects=[TEST_HYPERS_OBJ[0]])) as s:
result = self.controller.detail(req)
s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('1.1.1.1'),
'hypervisor_hostname': 'hyper1',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': TEST_HYPERS_OBJ[0].uuid,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute1',
'id': TEST_SERVICES[0].uuid},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
]
}
# There are no links when using the hypervisor_hostname_pattern
# query string since we can't page using a pattern matcher.
self.assertNotIn('hypervisors_links', result)
self.assertDictEqual(expected, result)
def test_search_invalid_hostname_pattern_parameter(self):
"""Tests passing an invalid hypervisor_hostname_pattern query
parameter.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=invalid~host')
self.assertRaises(
exception.ValidationError, self.controller.detail, req)
def test_search_non_exist(self):
"""This is a duplicate of test_servers_with_non_integer_hypervisor_id.
"""
pass
def test_search_unmapped(self):
"""This is already tested with test_index_compute_host_not_mapped."""
pass
def test_show_non_integer_id(self):
"""There is no reason to test this for 2.53 since 2.53 requires a
non-integer id (requires a uuid).
"""
pass
def test_show_integer_id(self):
"""Tests that we get a 400 if passed a hypervisor integer id to show().
"""
req = self._get_request(True)
ex = self.assertRaises(exc.HTTPBadRequest,
self.controller.show, req, '1')
self.assertIn('Invalid uuid 1', six.text_type(ex))
def test_show_with_servers_invalid_parameter(self):
"""Tests passing an invalid value for the with_servers query parameter
to the show() method to make sure the query parameter is validated.
"""
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=invalid' % hyper_id)
ex = self.assertRaises(
exception.ValidationError, self.controller.show, req, hyper_id)
self.assertIn('with_servers', six.text_type(ex))
def test_show_with_servers_host_mapping_not_found(self):
"""Tests that a 404 is returned if instance_get_all_by_host raises
HostMappingNotFound.
"""
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=true' % hyper_id)
with mock.patch.object(
self.controller.host_api, 'instance_get_all_by_host',
side_effect=exception.HostMappingNotFound(name=hyper_id)):
self.assertRaises(exc.HTTPNotFound, self.controller.show,
req, hyper_id)
def test_show_with_servers(self):
"""Tests the show() result when servers are included in the output."""
instances = objects.InstanceList(objects=[objects.Instance(
id=1, uuid=uuids.hyper1_instance1)])
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=on' % hyper_id)
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=instances) as mock_inst_get_all:
result = self.controller.show(req, hyper_id)
show_with_servers = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
show_with_servers['servers'] = [
{'name': 'instance-00000001', 'uuid': uuids.hyper1_instance1}]
self.assertDictEqual(dict(hypervisor=show_with_servers), result)
# instance_get_all_by_host is called
mock_inst_get_all.assert_called_once_with(
req.environ['nova.context'], TEST_HYPERS_OBJ[0].host)
def test_uptime_non_integer_id(self):
"""There is no reason to test this for 2.53 since 2.53 requires a
non-integer id (requires a uuid).
"""
pass
def test_uptime_integer_id(self):
"""Tests that we get a 400 if passed a hypervisor integer id to
uptime().
"""
req = self._get_request(True)
ex = self.assertRaises(exc.HTTPBadRequest,
self.controller.uptime, req, '1')
self.assertIn('Invalid uuid 1', six.text_type(ex))
def test_detail_pagination(self):
"""Tests details paging with uuid markers."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?limit=1&marker=%s' %
TEST_HYPERS_OBJ[0].uuid)
result = self.controller.detail(req)
link = ('http://localhost/v2/os-hypervisors/detail?limit=1&marker=%s' %
TEST_HYPERS_OBJ[1].uuid)
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': TEST_HYPERS_OBJ[1].uuid,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': TEST_SERVICES[1].uuid},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_detail_pagination_with_invalid_marker(self):
"""Tests detail paging with an invalid marker (not found)."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?marker=%s' % uuids.invalid_marker)
self.assertRaises(exc.HTTPBadRequest,
self.controller.detail, req)
def test_detail_pagination_with_additional_filter(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=9&unknown=2')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_detail_duplicate_query_parameters_validation(self):
"""Tests that the list Detail query parameter schema enforces only a
single entry for any query parameter.
"""
params = {
'limit': 1,
'marker': uuids.marker,
'hypervisor_hostname_pattern': 'foo',
'with_servers': 'true'
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?%s=%s&%s=%s' %
(param, value, param, value))
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_index_pagination(self):
"""Tests index paging with uuid markers."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?limit=1&marker=%s' %
TEST_HYPERS_OBJ[0].uuid)
result = self.controller.index(req)
link = ('http://localhost/v2/os-hypervisors?limit=1&marker=%s' %
TEST_HYPERS_OBJ[1].uuid)
expected = {
'hypervisors': [{
'hypervisor_hostname': 'hyper2',
'id': TEST_HYPERS_OBJ[1].uuid,
'state': 'up',
'status': 'enabled'
}],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_index_pagination_with_invalid_marker(self):
"""Tests index paging with an invalid marker (not found)."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?marker=%s' % uuids.invalid_marker)
self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req)
def test_index_pagination_with_additional_filter(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/?limit=1&marker=9&unknown=2')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_duplicate_query_parameters_validation(self):
"""Tests that the list query parameter schema enforces only a single
entry for any query parameter.
"""
params = {
'limit': 1,
'marker': uuids.marker,
'hypervisor_hostname_pattern': 'foo',
'with_servers': 'true'
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?%s=%s&%s=%s' %
(param, value, param, value))
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_show_duplicate_query_parameters_validation(self):
"""Tests that the show query parameter schema enforces only a single
entry for any query parameter.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=1&with_servers=1' %
uuids.hyper1)
self.assertRaises(exception.ValidationError,
self.controller.show, req, uuids.hyper1)
class HypervisorsTestV275(HypervisorsTestV253):
api_version = '2.75'
def _test_servers_with_no_server(self, func, version=None, **kwargs):
"""Tests GET APIs return 'servers' field in response even
no servers on hypervisors.
"""
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=[]):
req = fakes.HTTPRequest.blank('/os-hypervisors?with_servers=1',
use_admin_context=True,
version=version or self.api_version)
result = func(req, **kwargs)
return result
def test_list_servers_with_no_server_old_version(self):
result = self._test_servers_with_no_server(self.controller.index,
version='2.74')
for hyper in result['hypervisors']:
self.assertNotIn('servers', hyper)
def test_list_detail_servers_with_no_server_old_version(self):
result = self._test_servers_with_no_server(self.controller.detail,
version='2.74')
for hyper in result['hypervisors']:
self.assertNotIn('servers', hyper)
def test_show_servers_with_no_server_old_version(self):
result = self._test_servers_with_no_server(self.controller.show,
version='2.74',
id=uuids.hyper1)
self.assertNotIn('servers', result['hypervisor'])
def test_servers_with_no_server(self):
result = self._test_servers_with_no_server(self.controller.index)
for hyper in result['hypervisors']:
self.assertEqual(0, len(hyper['servers']))
def test_list_detail_servers_with_empty_server_list(self):
result = self._test_servers_with_no_server(self.controller.detail)
for hyper in result['hypervisors']:
self.assertEqual(0, len(hyper['servers']))
def test_show_servers_with_empty_server_list(self):
result = self._test_servers_with_no_server(self.controller.show,
id=uuids.hyper1)
self.assertEqual(0, len(result['hypervisor']['servers']))
| 41.592939
| 79
| 0.570491
|
4a153ba8803041d563cef13b42c97244772b14e9
| 203
|
py
|
Python
|
gamesolver/Games/TierGame.py
|
Ant1ng2/gamesolver
|
fff76a1fad7700e885c133919b6743882f92e7e9
|
[
"MIT"
] | null | null | null |
gamesolver/Games/TierGame.py
|
Ant1ng2/gamesolver
|
fff76a1fad7700e885c133919b6743882f92e7e9
|
[
"MIT"
] | 3
|
2019-07-22T04:15:55.000Z
|
2019-07-22T04:26:31.000Z
|
gamesolver/Games/TierGame.py
|
Ant1ng2/Gamesolver
|
fff76a1fad7700e885c133919b6743882f92e7e9
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from . Game import Game
class TierGame(Game):
@abstractmethod
def getNumTiers(self):
pass
@abstractmethod
def getCurTier(self):
pass
| 16.916667
| 35
| 0.669951
|
4a153c6ad19dcccdbe79fb4f030f76bc6ea07768
| 2,515
|
py
|
Python
|
corgi/prototypes/loadbalance_test.py
|
Krissmedt/imprunko
|
94171d0d47171cc4b199cd52f5f29385cbff903e
|
[
"MIT"
] | null | null | null |
corgi/prototypes/loadbalance_test.py
|
Krissmedt/imprunko
|
94171d0d47171cc4b199cd52f5f29385cbff903e
|
[
"MIT"
] | null | null | null |
corgi/prototypes/loadbalance_test.py
|
Krissmedt/imprunko
|
94171d0d47171cc4b199cd52f5f29385cbff903e
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
#import scipy
#from pylab import *
#import palettable as pal
#from matplotlib import cm
#cmap = cm.get_cmap('inferno_r')
#cmap = pal.wesanderson.Moonrise1_5.mpl_colormap
import loadbalance as corgi
# setup configuration
corgi.xmin = corgi.ymin = 0.0
corgi.xmax = corgi.ymax = 1.0
corgi.Nrank = 4
corgi.Nx = 4
corgi.Ny = 4
corgi.grid = np.zeros( (corgi.Nx, corgi.Ny) )
corgi.grid[:2, :2] = 0
corgi.grid[:2, 2:] = 1
corgi.grid[2:, :2] = 2
corgi.grid[2:, 2:] = 3
print corgi.grid
# load nodes
nodes = []
for rank in range(corgi.Nrank):
n = corgi.grid(rank)
for i in range(corgi.Nx):
for j in range(corgi.Ny):
if corgi.grid[i,j] == rank:
c = corgi.cell(i,j,rank)
c.data = corgi.Nrank*1000 + i*100 + j*10
print "inserting cell {} at ({},{}) d={}".format(rank, i, j, c.data)
#n.cells = np.append(n.cells, c)
n.cells.append( c )
nodes.append(n)
print "\n\n\n"
print "Unit cell test"
print "--------------------------------------------------"
cell1 = nodes[0].cells[0]
print "0 0 data", cell1.data
cell1.data += 1
print "neighs"
print "0 0", cell1.neighs(0,0)
print "1 0", cell1.neighs(1,0)
print "0 1", cell1.neighs(0,1)
print "1 1", cell1.neighs(1,1)
print "-1 0", cell1.neighs(1,0)
print "0 -1", cell1.neighs(0,1)
print "-1 -1", cell1.neighs(1,1)
print "full neighborhood"
print cell1.full_neighborhood()
print "virtuals:"
print "--------------------------------------------------"
print nodes[0].get_all_virtuals()
print "returning cell and pointer-like behavior"
print "--------------------------------------------------"
print "0 1 index:", nodes[0].get_neighbor_index(cell1, 0, 1)
cell2 = nodes[0].get_neighbor_cell(cell1, 0, 1)
print "data value:", cell2.data
cell2.data += 1
cell2copy = nodes[0].get_neighbor_cell(cell1, 0, 1)
print "data value:", cell2copy.data
cell2.data += 1
print "data value:", cell2copy.data
print "virtual neighborhood:"
print "--------------------------------------------------"
corgi.communicate(nodes)
cell = nodes[0].cells[0]
print cell.owner
print "virtual neighborhood", nodes[0].virtual_neighborhood(cell)
print "communication"
print "--------------------------------------------------"
#nodes[0].pack_virtuals()
corgi.communicate(nodes)
print nodes[0].send_queue_address
print nodes[0].virtuals[0].data
print "--------------------------------------------------"
print "adoption"
corgi.adopt(nodes)
| 21.134454
| 84
| 0.577734
|
4a153c9ea40c6769281d624a151b7a8ed5398433
| 2,082
|
py
|
Python
|
aliyun-python-sdk-smartag/aliyunsdksmartag/request/v20180313/GetCloudConnectNetworkUseLimitRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1
|
2021-03-08T02:59:17.000Z
|
2021-03-08T02:59:17.000Z
|
aliyun-python-sdk-smartag/aliyunsdksmartag/request/v20180313/GetCloudConnectNetworkUseLimitRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-smartag/aliyunsdksmartag/request/v20180313/GetCloudConnectNetworkUseLimitRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmartag.endpoint import endpoint_data
class GetCloudConnectNetworkUseLimitRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Smartag', '2018-03-13', 'GetCloudConnectNetworkUseLimit','smartag')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
| 37.178571
| 97
| 0.779059
|
4a153cab581f4af7bcd3af0dd9eac718bd272983
| 606
|
py
|
Python
|
complete_the_pattern_#14.py
|
Kunalpod/codewars
|
8dc1af2f3c70e209471045118fd88b3ea1e627e5
|
[
"MIT"
] | null | null | null |
complete_the_pattern_#14.py
|
Kunalpod/codewars
|
8dc1af2f3c70e209471045118fd88b3ea1e627e5
|
[
"MIT"
] | null | null | null |
complete_the_pattern_#14.py
|
Kunalpod/codewars
|
8dc1af2f3c70e209471045118fd88b3ea1e627e5
|
[
"MIT"
] | null | null | null |
#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Complete The Pattern #14
#Problem level: 6 kyu
def pattern(*args):
n = args[0]
y = 1 if len(args)==1 else args[1]
s= ""
if n<1: return s
if y<=1: y = 1
for i in range(y):
x = 1 if i==0 else 2
for j in range(x, n):
s += ' '*(j-1) + str(j%10) + ' '*(2*(n-j)-1) + str(j%10) + ' '*(j-1) + '\n'
s += ' '*(n-1) + str(n%10) + ' '*(n-1) + '\n'
for j in reversed(list(range(1, n))):
s += ' '*(j-1) + str(j%10) + ' '*(2*(n-j)-1) + str(j%10) + ' '*(j-1) + '\n'
return s[:-1]
| 30.3
| 87
| 0.425743
|
4a153e1f3118ecdc1315ceb6815215befcbe007a
| 1,665
|
py
|
Python
|
office365_admin/komand_office365_admin/actions/assign_license_to_user/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2020-03-18T09:14:55.000Z
|
2020-03-18T09:14:55.000Z
|
office365_admin/komand_office365_admin/actions/assign_license_to_user/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
office365_admin/komand_office365_admin/actions/assign_license_to_user/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
import komand
from .schema import AssignLicenseToUserInput, AssignLicenseToUserOutput, Input, Output, Component
# Custom imports below
import requests
from komand.exceptions import PluginException
class AssignLicenseToUser(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="assign_license_to_user",
description=Component.DESCRIPTION,
input=AssignLicenseToUserInput(),
output=AssignLicenseToUserOutput())
def run(self, params={}):
user_principal_name = params.get(Input.USER_PRINCIPAL_NAME)
sku_id = params.get(Input.SKU_ID)
token = self.connection.access_token
base_url = "https://graph.microsoft.com/v1.0/users/%s/assignLicense" % user_principal_name
headers = {"Authorization": "Bearer %s" % token, "Content-Type": "application/json",}
body = {
"addLicenses": [{
"disabledPlans": [],
"skuId": sku_id
}],
"removeLicenses": []
}
try:
response = requests.post(base_url, json=body, headers=headers)
except requests.HTTPError:
raise PluginException(cause=f"There was an issue with the Assign License request. Double-check the user name: {user_principal_name}",
data=response.text)
if response.status_code == 200:
return {Output.SUCCESS: True}
else:
raise PluginException(f"The response from Office365 indicated something went wrong: {response.status_code}",
data=response.text)
| 38.72093
| 145
| 0.621021
|
4a153e61848e86d786a6f10e9de3820c863bbbed
| 2,577
|
py
|
Python
|
docs/federated/docs/source_en/conf.py
|
bwcsswcx/docs
|
e54b179bb8ca020a9bf0c83926822048057e9536
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
docs/federated/docs/source_en/conf.py
|
bwcsswcx/docs
|
e54b179bb8ca020a9bf0c83926822048057e9536
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
docs/federated/docs/source_en/conf.py
|
bwcsswcx/docs
|
e54b179bb8ca020a9bf0c83926822048057e9536
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import IPython
import re
import nbsphinx as nbs
# -- Project information -----------------------------------------------------
project = 'MindSpore'
copyright = '2021, MindSpore'
author = 'MindSpore'
# The full version, including alpha/beta/rc tags
release = 'master'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_markdown_tables',
'myst_parser',
'nbsphinx',
'sphinx.ext.mathjax',
'IPython.sphinxext.ipython_console_highlighting'
]
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
highlight_language = 'none'
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_search_language = 'en'
# Remove extra outputs for nbsphinx extension.
nbsphinx_source_re = re.compile(r"(app\.connect\('html-collect-pages', html_collect_pages\))")
nbsphinx_math_re = re.compile(r"(\S.*$)")
mod_path = os.path.abspath(nbs.__file__)
with open(mod_path, "r+", encoding="utf8") as f:
contents = f.readlines()
for num, line in enumerate(contents):
_content_re = nbsphinx_source_re.search(line)
if _content_re and "#" not in line:
contents[num] = nbsphinx_source_re.sub(r"# \g<1>", line)
if "mathjax_config = app.config" in line and "#" not in line:
contents[num:num+10] = [nbsphinx_math_re.sub(r"# \g<1>", i) for i in contents[num:num+10]]
break
exec("".join(contents), nbs.__dict__)
| 32.620253
| 102
| 0.654249
|
4a153eb36681d2f1cf7aed09b5f72af4b7f019db
| 2,220
|
py
|
Python
|
python/dqTools/Handler.py
|
WhiteTshirtXI/subgrid
|
724dfd543c19563bd7874cf952c21f25dd7d1352
|
[
"MIT"
] | null | null | null |
python/dqTools/Handler.py
|
WhiteTshirtXI/subgrid
|
724dfd543c19563bd7874cf952c21f25dd7d1352
|
[
"MIT"
] | 3
|
2018-10-22T08:35:17.000Z
|
2018-11-07T16:14:36.000Z
|
python/dqTools/Handler.py
|
otbrown/subgrid
|
724dfd543c19563bd7874cf952c21f25dd7d1352
|
[
"MIT"
] | 1
|
2020-05-07T07:48:02.000Z
|
2020-05-07T07:48:02.000Z
|
"""Adds the ability to Controllers to checkpoint upon receipt of a
signal, by default SIGTERM. There can be only one instance of Handler.
Your class must subclass from both Handler.Handler and
Controller.Controller, furthermore Handler must be the leftmost of the
superclasses. E.g.,
class MyController(Handler.Handler, Controller.Controller):
pass
"""
import sys
import signal
import weakref
class Handler(object):
# The commented out bits were from an attempt to relax the
# singleton requirement.
instance = None#weakref.WeakValueDictionary()
#i = 0
def __new__(cls, *args, **kwargs):
if Handler.instance is None or Handler.instance() is None:
self = object.__new__(cls, *args, **kwargs)
self._signalled = False
Handler.instance = weakref.ref(self)
else:
raise RuntimeError('This is a singleton class that inherits from Handler!')
#cls.instances[cls.i] = self
#cls.i += 1
return self
@classmethod
def handle(cls, signum, frame):
#for con in cls.instances.values():
if cls.instance is not None:
con = cls.instance()
if con is not None:
# easiest would be to call con.checkpoint() but we can't
# just stop part way through a timestep, need to let this
# one finish, THEN checkpoint (& die). Instead, set
# con._signalled to True and alter isCPStep to deal.
con._signalled = True
con.log('Received signal to checkpoint and die')
return
def isCheckpointStep(self):
if self._signalled:
return True
return super(Handler, self).isCheckpointStep()
def checkpoint(self):
signalled = self._signalled
# We really don't want to be saving the state with this flag set:
# as soon as it runs on resume it will checkpoint & die!
self._signalled = False
super(Handler, self).checkpoint()
if signalled:
self.log('Terminating due to signal... bye!')
sys.exit(0)
pass
return
pass
signal.signal(signal.SIGTERM, Handler.handle)
| 30.833333
| 87
| 0.625225
|
4a153ecca2202f1beaa7a797764d186665196ca6
| 291
|
py
|
Python
|
tests/implementations/memory.py
|
timwford/fastapi-crudrouter
|
683d0ac979d5547a19de928870b2f377fca80b94
|
[
"MIT"
] | 2
|
2021-02-18T21:24:55.000Z
|
2022-01-28T14:40:23.000Z
|
tests/implementations/memory.py
|
jorgerpo/fastapi-crudrouter
|
bcae0dc780b12831d181b645aaa4dc1fde278870
|
[
"MIT"
] | null | null | null |
tests/implementations/memory.py
|
jorgerpo/fastapi-crudrouter
|
bcae0dc780b12831d181b645aaa4dc1fde278870
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from fastapi_crudrouter import MemoryCRUDRouter as CRUDRouter
from tests import Potato, Carrot
def memory_implementation():
app = FastAPI()
app.include_router(CRUDRouter(schema=Potato))
app.include_router(CRUDRouter(schema=Carrot))
return app
| 22.384615
| 61
| 0.783505
|
4a153f30958b638494b4223d628a5383c69670da
| 24,972
|
py
|
Python
|
addons/cats-blender-plugin-0-19-0/extern_tools/mmd_tools_local/core/morph.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | 725
|
2020-06-30T00:12:35.000Z
|
2021-11-07T03:10:44.000Z
|
addons/cats-blender-plugin-0-19-0/extern_tools/mmd_tools_local/core/morph.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | 144
|
2017-12-27T20:57:07.000Z
|
2020-06-27T13:58:40.000Z
|
addons/cats-blender-plugin-0-19-0/extern_tools/mmd_tools_local/core/morph.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | 106
|
2020-07-04T02:56:27.000Z
|
2021-11-07T08:54:11.000Z
|
# -*- coding: utf-8 -*-
import re
import bpy
from mmd_tools_local import bpyutils
from mmd_tools_local.bpyutils import SceneOp
from mmd_tools_local.bpyutils import ObjectOp
from mmd_tools_local.bpyutils import TransformConstraintOp
class FnMorph(object):
def __init__(self, morph, model):
self.__morph = morph
self.__rig = model
@classmethod
def storeShapeKeyOrder(cls, obj, shape_key_names):
if len(shape_key_names) < 1:
return
assert(SceneOp(bpy.context).active_object == obj)
if obj.data.shape_keys is None:
bpy.ops.object.shape_key_add()
if bpy.app.version < (2, 73, 0):
def __move_to_bottom(key_blocks, name):
obj.active_shape_key_index = key_blocks.find(name)
for move in range(len(key_blocks)-1-obj.active_shape_key_index):
bpy.ops.object.shape_key_move(type='DOWN')
else:
def __move_to_bottom(key_blocks, name):
obj.active_shape_key_index = key_blocks.find(name)
bpy.ops.object.shape_key_move(type='BOTTOM')
key_blocks = obj.data.shape_keys.key_blocks
for name in shape_key_names:
if name not in key_blocks:
obj.shape_key_add(name=name)
elif len(key_blocks) > 1:
__move_to_bottom(key_blocks, name)
@classmethod
def fixShapeKeyOrder(cls, obj, shape_key_names):
if len(shape_key_names) < 1:
return
assert(SceneOp(bpy.context).active_object == obj)
key_blocks = getattr(obj.data.shape_keys, 'key_blocks', None)
if key_blocks is None:
return
if bpy.app.version < (2, 73, 0):
len_key_blocks = len(key_blocks)
for ii, name in enumerate(x for x in reversed(shape_key_names) if x in key_blocks):
obj.active_shape_key_index = idx = key_blocks.find(name)
offset = (len_key_blocks - 1 - idx) - ii
move_type = 'UP' if offset < 0 else 'DOWN'
for move in range(abs(offset)):
bpy.ops.object.shape_key_move(type=move_type)
else:
for name in shape_key_names:
idx = key_blocks.find(name)
if idx < 0:
continue
obj.active_shape_key_index = idx
bpy.ops.object.shape_key_move(type='BOTTOM')
@staticmethod
def get_morph_slider(rig):
return _MorphSlider(rig)
@staticmethod
def category_guess(morph):
name_lower = morph.name.lower()
if 'mouth' in name_lower:
morph.category = 'MOUTH'
elif 'eye' in name_lower:
if 'brow' in name_lower:
morph.category = 'EYEBROW'
else:
morph.category = 'EYE'
@classmethod
def load_morphs(cls, rig):
mmd_root = rig.rootObject().mmd_root
vertex_morphs = mmd_root.vertex_morphs
for obj in rig.meshes():
for kb in getattr(obj.data.shape_keys, 'key_blocks', ())[1:]:
if not kb.name.startswith('mmd_') and kb.name not in vertex_morphs:
item = vertex_morphs.add()
item.name = kb.name
item.name_e = kb.name
cls.category_guess(item)
@staticmethod
def remove_shape_key(obj, key_name):
key_blocks = getattr(obj.data.shape_keys, 'key_blocks', None)
if key_blocks and key_name in key_blocks:
ObjectOp(obj).shape_key_remove(key_blocks[key_name])
@staticmethod
def copy_shape_key(obj, src_name, dest_name):
key_blocks = getattr(obj.data.shape_keys, 'key_blocks', None)
if key_blocks and src_name in key_blocks:
if dest_name in key_blocks:
ObjectOp(obj).shape_key_remove(key_blocks[dest_name])
obj.active_shape_key_index = key_blocks.find(src_name)
obj.show_only_shape_key, last = True, obj.show_only_shape_key
obj.shape_key_add(name=dest_name, from_mix=True)
obj.show_only_shape_key = last
obj.active_shape_key_index = key_blocks.find(dest_name)
@staticmethod
def get_uv_morph_vertex_groups(obj, morph_name=None, offset_axes='XYZW'):
pattern = 'UV_%s[+-][%s]$'%(morph_name or '.{1,}', offset_axes or 'XYZW')
# yield (vertex_group, morph_name, axis),...
return ((g, g.name[3:-2], g.name[-2:]) for g in obj.vertex_groups if re.match(pattern, g.name))
@staticmethod
def copy_uv_morph_vertex_groups(obj, src_name, dest_name):
for vg, n, x in FnMorph.get_uv_morph_vertex_groups(obj, dest_name):
obj.vertex_groups.remove(vg)
for vg_name in tuple(i[0].name for i in FnMorph.get_uv_morph_vertex_groups(obj, src_name)):
obj.vertex_groups.active = obj.vertex_groups[vg_name]
override = {'object':obj, 'window':bpy.context.window, 'region':bpy.context.region}
bpy.ops.object.vertex_group_copy(override)
obj.vertex_groups.active.name = vg_name.replace(src_name, dest_name)
@staticmethod
def clean_uv_morph_vertex_groups(obj):
# remove empty vertex groups of uv morphs
vg_indices = {g.index for g, n, x in FnMorph.get_uv_morph_vertex_groups(obj)}
vertex_groups = obj.vertex_groups
for v in obj.data.vertices:
for x in v.groups:
if x.group in vg_indices and x.weight > 0:
vg_indices.remove(x.group)
for i in sorted(vg_indices, reverse=True):
vg = vertex_groups[i]
m = obj.modifiers.get('mmd_bind%s'%hash(vg.name), None)
if m:
obj.modifiers.remove(m)
vertex_groups.remove(vg)
@staticmethod
def get_uv_morph_offset_map(obj, morph):
offset_map = {} # offset_map[vertex_index] = offset_xyzw
if morph.data_type == 'VERTEX_GROUP':
scale = morph.vertex_group_scale
axis_map = {g.index:x for g, n, x in FnMorph.get_uv_morph_vertex_groups(obj, morph.name)}
for v in obj.data.vertices:
i = v.index
for x in v.groups:
if x.group in axis_map and x.weight > 0:
axis, weight = axis_map[x.group], x.weight
d = offset_map.setdefault(i, [0, 0, 0, 0])
d['XYZW'.index(axis[1])] += -weight*scale if axis[0] == '-' else weight*scale
else:
for val in morph.data:
i = val.index
if i in offset_map:
offset_map[i] = [a+b for a, b in zip(offset_map[i], val.offset)]
else:
offset_map[i] = val.offset
return offset_map
@staticmethod
def store_uv_morph_data(obj, morph, offsets=None, offset_axes='XYZW'):
vertex_groups = obj.vertex_groups
morph_name = getattr(morph, 'name', None)
if offset_axes:
for vg, n, x in FnMorph.get_uv_morph_vertex_groups(obj, morph_name, offset_axes):
vertex_groups.remove(vg)
if not morph_name or not offsets:
return
axis_indices = tuple('XYZW'.index(x) for x in offset_axes) or tuple(range(4))
offset_map = FnMorph.get_uv_morph_offset_map(obj, morph) if offset_axes else {}
for data in offsets:
idx, offset = data.index, data.offset
for i in axis_indices:
offset_map.setdefault(idx, [0, 0, 0, 0])[i] += round(offset[i], 5)
max_value = max(max(abs(x) for x in v) for v in offset_map.values() or ([0],))
scale = morph.vertex_group_scale = max(abs(morph.vertex_group_scale), max_value)
for idx, offset in offset_map.items():
for val, axis in zip(offset, 'XYZW'):
if abs(val) > 1e-4:
vg_name = 'UV_{0}{1}{2}'.format(morph_name, '-' if val < 0 else '+', axis)
vg = vertex_groups.get(vg_name, None) or vertex_groups.new(name=vg_name)
vg.add(index=[idx], weight=abs(val)/scale, type='REPLACE')
def update_mat_related_mesh(self, new_mesh=None):
for offset in self.__morph.data:
# Use the new_mesh if provided
meshObj = new_mesh
if new_mesh is None:
# Try to find the mesh by material name
meshObj = self.__rig.findMesh(offset.material)
if meshObj is None:
# Given this point we need to loop through all the meshes
for mesh in self.__rig.meshes():
if mesh.data.materials.find(offset.material) >= 0:
meshObj = mesh
break
# Finally update the reference
if meshObj is not None:
offset.related_mesh = meshObj.data.name
class _MorphSlider:
def __init__(self, model):
self.__rig = model
def placeholder(self, create=False, binded=False):
rig = self.__rig
root = rig.rootObject()
obj = next((x for x in root.children if x.mmd_type == 'PLACEHOLDER' and x.type == 'MESH'), None)
if create and obj is None:
obj = bpy.data.objects.new(name='.placeholder', object_data=bpy.data.meshes.new('.placeholder'))
obj.mmd_type = 'PLACEHOLDER'
obj.parent = root
SceneOp(bpy.context).link_object(obj)
if obj and obj.data.shape_keys is None:
key = obj.shape_key_add(name='--- morph sliders ---')
key.mute = True
if binded and obj and obj.data.shape_keys.key_blocks[0].mute:
return None
return obj
@property
def dummy_armature(self):
obj = self.placeholder()
return self.__dummy_armature(obj) if obj else None
def __dummy_armature(self, obj, create=False):
arm = next((x for x in obj.children if x.mmd_type == 'PLACEHOLDER' and x.type == 'ARMATURE'), None)
if create and arm is None:
arm = bpy.data.objects.new(name='.dummy_armature', object_data=bpy.data.armatures.new(name='.dummy_armature'))
arm.mmd_type = 'PLACEHOLDER'
arm.parent = obj
SceneOp(bpy.context).link_object(arm)
arm.data.draw_type = 'STICK'
return arm
def get(self, morph_name):
obj = self.placeholder()
if obj is None:
return None
key_blocks = obj.data.shape_keys.key_blocks
if key_blocks[0].mute:
return None
return key_blocks.get(morph_name, None)
def create(self):
self.__rig.loadMorphs()
obj = self.placeholder(create=True)
self.__load(obj, self.__rig.rootObject().mmd_root)
return obj
def __load(self, obj, mmd_root):
attr_list = ('group', 'vertex', 'bone', 'uv', 'material')
morph_key_blocks = obj.data.shape_keys.key_blocks
for m in (x for attr in attr_list for x in getattr(mmd_root, attr+'_morphs', ())):
name = m.name
#if name[-1] == '\\': # fix driver's bug???
# m.name = name = name + ' '
if name and name not in morph_key_blocks:
obj.shape_key_add(name=name)
@staticmethod
def __driver_variables(id_data, path, index=-1):
d = id_data.driver_add(path, index)
variables = d.driver.variables
for x in variables:
variables.remove(x)
return d.driver, variables
@staticmethod
def __add_single_prop(variables, id_obj, data_path, prefix):
var = variables.new()
var.name = prefix + str(len(variables))
var.type = 'SINGLE_PROP'
target = var.targets[0]
target.id_type = 'OBJECT'
target.id = id_obj
target.data_path = data_path
return var
def __cleanup(self, names_in_use=None):
names_in_use = names_in_use or {}
rig = self.__rig
for mesh in rig.meshes():
for kb in getattr(mesh.data.shape_keys, 'key_blocks', ()):
if kb.name.startswith('mmd_bind') and kb.name not in names_in_use:
kb.driver_remove('value')
kb.relative_key.mute = False
ObjectOp(mesh).shape_key_remove(kb)
for m in mesh.modifiers: # uv morph
if m.name.startswith('mmd_bind') and m.name not in names_in_use:
mesh.modifiers.remove(m)
from mmd_tools_local.core.shader import _MaterialMorph
for m in rig.materials():
if m and m.node_tree:
for n in sorted((x for x in m.node_tree.nodes if x.name.startswith('mmd_bind')), key=lambda x: -x.location[0]):
_MaterialMorph.reset_morph_links(n)
m.node_tree.nodes.remove(n)
attributes = set(TransformConstraintOp.min_max_attributes('LOCATION', 'to'))
attributes |= set(TransformConstraintOp.min_max_attributes('ROTATION', 'to'))
for b in rig.armature().pose.bones:
for c in b.constraints:
if c.name.startswith('mmd_bind') and c.name[:-4] not in names_in_use:
for attr in attributes:
c.driver_remove(attr)
b.constraints.remove(c)
def unbind(self):
mmd_root = self.__rig.rootObject().mmd_root
for m in mmd_root.bone_morphs:
for d in m.data:
d.name = ''
for m in mmd_root.material_morphs:
for d in m.data:
d.name = ''
obj = self.placeholder()
if obj:
obj.data.shape_keys.key_blocks[0].mute = True
arm = self.__dummy_armature(obj)
if arm:
for b in arm.pose.bones:
if b.name.startswith('mmd_bind'):
b.driver_remove('location')
b.driver_remove('rotation_quaternion')
self.__cleanup()
def bind(self):
rig = self.__rig
root = rig.rootObject()
armObj = rig.armature()
mmd_root = root.mmd_root
obj = self.create()
arm = self.__dummy_armature(obj, create=True)
morph_key_blocks = obj.data.shape_keys.key_blocks
# data gathering
group_map = {}
shape_key_map = {}
uv_morph_map = {}
for mesh in rig.meshes():
mesh.show_only_shape_key = False
key_blocks = getattr(mesh.data.shape_keys, 'key_blocks', ())
for kb in key_blocks:
kb_name = kb.name
if kb_name not in morph_key_blocks:
continue
name_bind = 'mmd_bind%s'%hash(morph_key_blocks[kb_name])
if name_bind not in key_blocks:
mesh.shape_key_add(name=name_bind)
kb_bind = key_blocks[name_bind]
kb_bind.relative_key = kb
kb_bind.slider_min = -10
kb_bind.slider_max = 10
data_path = 'data.shape_keys.key_blocks["%s"].value'%kb_name.replace('"', '\\"')
groups = []
shape_key_map.setdefault(name_bind, []).append((kb_bind, data_path, groups))
group_map.setdefault(('vertex_morphs', kb_name), []).append(groups)
uv_layers = [l.name for l in mesh.data.uv_layers if not l.name.startswith('_')]
uv_layers += ['']*(5-len(uv_layers))
for vg, morph_name, axis in FnMorph.get_uv_morph_vertex_groups(mesh):
morph = mmd_root.uv_morphs.get(morph_name, None)
if morph is None or morph.data_type != 'VERTEX_GROUP':
continue
uv_layer = '_'+uv_layers[morph.uv_index] if axis[1] in 'ZW' else uv_layers[morph.uv_index]
if uv_layer not in mesh.data.uv_layers:
continue
name_bind = 'mmd_bind%s'%hash(vg.name)
uv_morph_map.setdefault(name_bind, ())
mod = mesh.modifiers.get(name_bind, None) or mesh.modifiers.new(name=name_bind, type='UV_WARP')
mod.show_expanded = False
mod.vertex_group = vg.name
mod.axis_u, mod.axis_v = ('Y', 'X') if axis[1] in 'YW' else ('X', 'Y')
mod.uv_layer = uv_layer
name_bind = 'mmd_bind%s'%hash(morph_name)
mod.object_from = mod.object_to = arm
if axis[0] == '-':
mod.bone_from, mod.bone_to = 'mmd_bind_ctrl_base', name_bind
else:
mod.bone_from, mod.bone_to = name_bind, 'mmd_bind_ctrl_base'
bone_offset_map = {}
with bpyutils.edit_object(arm) as data:
edit_bones = data.edit_bones
def __get_bone(name, layer, parent):
b = edit_bones.get(name, None) or edit_bones.new(name=name)
b.layers = [x == layer for x in range(len(b.layers))]
b.head = (0, 0, 0)
b.tail = (0, 0, 1)
b.use_deform = False
b.parent = parent
return b
for m in mmd_root.bone_morphs:
data_path = 'data.shape_keys.key_blocks["%s"].value'%m.name.replace('"', '\\"')
for d in m.data:
if not d.bone:
d.name = ''
continue
d.name = name_bind = 'mmd_bind%s'%hash(d)
b = __get_bone(name_bind, 10, None)
groups = []
bone_offset_map[name_bind] = (m.name, d, b.name, data_path, groups)
group_map.setdefault(('bone_morphs', m.name), []).append(groups)
ctrl_base = __get_bone('mmd_bind_ctrl_base', 11, None)
for m in mmd_root.uv_morphs:
morph_name = m.name.replace('"', '\\"')
data_path = 'data.shape_keys.key_blocks["%s"].value'%morph_name
scale_path = 'mmd_root.uv_morphs["%s"].vertex_group_scale'%morph_name
name_bind = 'mmd_bind%s'%hash(m.name)
b = __get_bone(name_bind, 11, ctrl_base)
groups = []
uv_morph_map.setdefault(name_bind, []).append((b.name, data_path, scale_path, groups))
group_map.setdefault(('uv_morphs', m.name), []).append(groups)
used_bone_names = bone_offset_map.keys()|uv_morph_map.keys()
used_bone_names.add(ctrl_base.name)
for b in edit_bones: # cleanup
if b.name.startswith('mmd_bind') and b.name not in used_bone_names:
edit_bones.remove(b)
material_offset_map = {}
for m in mmd_root.material_morphs:
morph_name = m.name.replace('"', '\\"')
data_path = 'data.shape_keys.key_blocks["%s"].value'%morph_name
groups = []
group_map.setdefault(('material_morphs', m.name), []).append(groups)
material_offset_map.setdefault('group_dict', {})[m.name] = (data_path, groups)
for d in m.data:
d.name = name_bind = 'mmd_bind%s'%hash(d)
table = material_offset_map.setdefault(d.material_id, ([], []))
table[1 if d.offset_type == 'ADD' else 0].append((m.name, d, name_bind))
for m in mmd_root.group_morphs:
if len(m.data) != len(set(m.data.keys())):
print(' * Found duplicated morph data in Group Morph "%s"'%m.name)
morph_name = m.name.replace('"', '\\"')
morph_path = 'data.shape_keys.key_blocks["%s"].value'%morph_name
for d in m.data:
param = (morph_name, d.name.replace('"', '\\"'))
factor_path = 'mmd_root.group_morphs["%s"].data["%s"].factor'%param
for groups in group_map.get((d.morph_type, d.name), ()):
groups.append((m.name, morph_path, factor_path))
self.__cleanup(shape_key_map.keys()|bone_offset_map.keys()|uv_morph_map.keys())
def __config_groups(variables, expression, groups):
for g_name, morph_path, factor_path in groups:
var = self.__add_single_prop(variables, obj, morph_path, 'g')
fvar = self.__add_single_prop(variables, root, factor_path, 'w')
expression = '%s+%s*%s'%(expression, var.name, fvar.name)
return expression
# vertex morphs
for kb_bind, morph_data_path, groups in (i for l in shape_key_map.values() for i in l):
driver, variables = self.__driver_variables(kb_bind, 'value')
var = self.__add_single_prop(variables, obj, morph_data_path, 'v')
driver.expression = '-(%s)'%__config_groups(variables, var.name, groups)
kb_bind.relative_key.mute = True
kb_bind.mute = False
# bone morphs
def __config_bone_morph(constraints, map_type, attributes, val, val_str):
c_name = 'mmd_bind%s.%s'%(hash(data), map_type[:3])
c = TransformConstraintOp.create(constraints, c_name, map_type)
TransformConstraintOp.update_min_max(c, val, None)
c.show_expanded = False
c.target = arm
c.subtarget = bname
for attr in attributes:
driver, variables = self.__driver_variables(armObj, c.path_from_id(attr))
var = self.__add_single_prop(variables, obj, morph_data_path, 'b')
expression = __config_groups(variables, var.name, groups)
sign = '-' if attr.startswith('to_min') else ''
driver.expression = '%s%s*(%s)'%(sign, val_str, expression)
from math import pi
attributes_rot = TransformConstraintOp.min_max_attributes('ROTATION', 'to')
attributes_loc = TransformConstraintOp.min_max_attributes('LOCATION', 'to')
for morph_name, data, bname, morph_data_path, groups in bone_offset_map.values():
b = arm.pose.bones[bname]
b.location = data.location
b.rotation_quaternion = data.rotation.__class__(*data.rotation.to_axis_angle()) # Fix for consistency
b.is_mmd_shadow_bone = True
b.mmd_shadow_bone_type = 'BIND'
pb = armObj.pose.bones[data.bone]
__config_bone_morph(pb.constraints, 'ROTATION', attributes_rot, pi, 'pi')
__config_bone_morph(pb.constraints, 'LOCATION', attributes_loc, 100, '100')
# uv morphs
if bpy.app.version >= (2, 80, 0): # workaround for Blender 2.80+, data_path can't be properly detected (Save & Reopen file also works)
root.parent, root.parent, root.matrix_parent_inverse = arm, root.parent, root.matrix_parent_inverse.copy()
b = arm.pose.bones['mmd_bind_ctrl_base']
b.is_mmd_shadow_bone = True
b.mmd_shadow_bone_type = 'BIND'
for bname, data_path, scale_path, groups in (i for l in uv_morph_map.values() for i in l):
b = arm.pose.bones[bname]
b.is_mmd_shadow_bone = True
b.mmd_shadow_bone_type = 'BIND'
driver, variables = self.__driver_variables(b, 'location', index=0)
var = self.__add_single_prop(variables, obj, data_path, 'u')
fvar = self.__add_single_prop(variables, root, scale_path, 's')
driver.expression = '(%s)*%s'%(__config_groups(variables, var.name, groups), fvar.name)
# material morphs
from mmd_tools_local.core.shader import _MaterialMorph
group_dict = material_offset_map.get('group_dict', {})
def __config_material_morph(mat, morph_list):
nodes = _MaterialMorph.setup_morph_nodes(mat, tuple(x[1] for x in morph_list))
for (morph_name, data, name_bind), node in zip(morph_list, nodes):
node.label, node.name = morph_name, name_bind
data_path, groups = group_dict[morph_name]
driver, variables = self.__driver_variables(mat.node_tree, node.inputs[0].path_from_id('default_value'))
var = self.__add_single_prop(variables, obj, data_path, 'm')
driver.expression = '%s'%__config_groups(variables, var.name, groups)
for mat in (m for m in rig.materials() if m and m.use_nodes and not m.name.startswith('mmd_')):
mat_id = mat.mmd_material.material_id
mul_all, add_all = material_offset_map.get(-1, ([], []))
mul_list, add_list = material_offset_map.get('' if mat_id < 0 else mat_id, ([], []))
morph_list = tuple(mul_all+mul_list+add_all+add_list)
__config_material_morph(mat, morph_list)
mat_edge = bpy.data.materials.get('mmd_edge.'+mat.name, None)
if mat_edge:
__config_material_morph(mat_edge, morph_list)
morph_key_blocks[0].mute = False
| 45.23913
| 142
| 0.583213
|
4a15412ef6bd5d536bfb5dbfe5d1a7d5c93c050e
| 2,436
|
py
|
Python
|
tests/test_homepage.py
|
MisterRios/epcon
|
346bdac13f0c68b39b47eece8d2376bc12e3adc3
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_homepage.py
|
MisterRios/epcon
|
346bdac13f0c68b39b47eece8d2376bc12e3adc3
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_homepage.py
|
MisterRios/epcon
|
346bdac13f0c68b39b47eece8d2376bc12e3adc3
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf import settings
from tests.common_tools import template_used
from tests.factories import SponsorFactory
from tests.common_tools import create_homepage_in_cms
def test_get_homepage(db, client):
create_homepage_in_cms()
url = "/"
response = client.get(url)
assert response.status_code == 200
assert template_used(response, "conference/homepage/home_template.html")
assert template_used(response, "conference/homepage/_venue.html")
assert template_used(response, "conference/homepage/_sponsors.html")
assert template_used(response, "conference/homepage/_schedule_overview.html")
assert template_used(response, "conference/header/_with_jumbotron.html")
assert b"EuroPython 2020" in response.content
def test_homepage_doesnt_contain_sponsor_if_no_income(db, client):
create_homepage_in_cms()
sponsor = SponsorFactory(
alt_text="Sponsor Alt Text", title_text="Sponsor Title Text"
)
url = "/"
response = client.get(url)
assert sponsor.alt_text not in response.content.decode()
assert sponsor.title_text not in response.content.decode()
def test_homepage_doesnt_contain_sponsor_if_income_for_different_conference(
db, client
):
create_homepage_in_cms()
sponsor = SponsorFactory(
alt_text="Sponsor Alt Text", title_text="Sponsor Title Text"
)
sponsor.sponsorincome_set.create(income=123, conference="whatever2020")
url = "/"
response = client.get(url)
assert sponsor.alt_text not in response.content.decode()
assert sponsor.title_text not in response.content.decode()
def test_homepage_contains_sponsors_if_income_for_current_conference(
db, client
):
create_homepage_in_cms()
sponsor = SponsorFactory(
alt_text="Sponsor Alt Text", title_text="Sponsor Title Text"
)
sponsor.sponsorincome_set.create(
income=123, conference=settings.CONFERENCE_CONFERENCE
)
url = "/"
response = client.get(url)
assert sponsor.alt_text in response.content.decode()
assert sponsor.title_text in response.content.decode()
def test_homepage_contains_googleanalytics(db, client):
create_homepage_in_cms()
url = "/"
response = client.get(url)
assert response.status_code == 200
EPCON_GA_ID = "UA-60323107"
# NOTE(artcz) this should probably go into a variable, but good enough for
assert EPCON_GA_ID in response.content.decode()
| 31.230769
| 81
| 0.746716
|
4a154214bf5fa67a8b7bea53b0abd594fbdbe779
| 1,734
|
py
|
Python
|
backup.py
|
Programmier-AG/BeeLogger
|
2f85bc27f4af9442bca4691c52f4d5a4b520caa7
|
[
"MIT"
] | 1
|
2022-03-04T08:42:54.000Z
|
2022-03-04T08:42:54.000Z
|
backup.py
|
Programmier-AG/BeeLogger
|
2f85bc27f4af9442bca4691c52f4d5a4b520caa7
|
[
"MIT"
] | 29
|
2020-09-23T17:17:05.000Z
|
2022-03-29T08:03:46.000Z
|
backup.py
|
Programmier-AG/BeeLogger
|
2f85bc27f4af9442bca4691c52f4d5a4b520caa7
|
[
"MIT"
] | 8
|
2021-03-21T21:13:23.000Z
|
2021-10-20T20:08:16.000Z
|
import os
import time
from shutil import copyfile, copytree, make_archive
from config import FileBackup, MySql
if not os.path.isfile("backup.py"):
print("You need to start this script from the directory it's contained in. Please cd into that folder.")
exit()
print("checking backup directory")
if not os.path.exists("backup/"):
print("create backup directory")
os.mkdir("backup/")
print("parsing backup name")
dir_name = time.asctime()
dest = "backup/" + dir_name + "/"
dest = dest.replace(" ", "-")
if os.path.exists(dest):
os.removedirs(dest)
os.mkdir(dest)
print("downloading MySql database")
os.popen("mysqldump -h %s -u %s -p%s %s > %sdb_backup.sql" % (MySql.host, MySql.user, MySql.password, MySql.db, dest)).readlines()
try:
print("copying files")
copyfile("logs/insert.log", dest + "insert.log")
# copytree("stats", dest + "stats/")
except FileNotFoundError:
print("no insert.log file, ignoring")
print("packing files")
make_archive(dest, "zip", dest)
print("cleaning up")
os.popen("rm -r " + dest).readlines()
print("saving on remote")
if FileBackup.key != "":
cmd = f"scp -o StrictHostKeyChecking=no -i 'secrets/{FileBackup.key}' -P {FileBackup.port} '{dest[:-1]}.zip' '{FileBackup.user}@{FileBackup.host}:{FileBackup.directory}'"
else:
cmd = f"sshpass -p {FileBackup.password} scp -o StrictHostKeyChecking=no -P {FileBackup.port} '{dest[:-1]}.zip' '{FileBackup.user}@{FileBackup.host}:{FileBackup.directory}'"
# cmd = "sshpass -p '%s' scp -P %s '%s.zip' '%s@%s:%s'" % (FileBackup.password, FileBackup.port, dest[:-1], FileBackup.user, FileBackup.host, FileBackup.directory)
print(cmd)
print(os.popen(cmd).read())
| 32.716981
| 178
| 0.66609
|
4a15422b9604b796290879693768ec5bb38ee254
| 10,258
|
py
|
Python
|
examples/pybullet/gym/pybullet_envs/minitaur/robots/hybrid_motor_model.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 9,136
|
2015-01-02T00:41:45.000Z
|
2022-03-31T15:30:02.000Z
|
examples/pybullet/gym/pybullet_envs/minitaur/robots/hybrid_motor_model.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 2,424
|
2015-01-05T08:55:58.000Z
|
2022-03-30T19:34:55.000Z
|
examples/pybullet/gym/pybullet_envs/minitaur/robots/hybrid_motor_model.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 2,921
|
2015-01-02T10:19:30.000Z
|
2022-03-31T02:48:42.000Z
|
# Lint as: python3
"""A generic PD motor model."""
from typing import Tuple, Union
import gin
import numpy as np
from pybullet_envs.minitaur.robots import robot_config
from pybullet_envs.minitaur.robots import time_ordered_buffer
_DEFAULT_BUFFER_SIZE = 200
_HYBRID_ACTION_LEN = len(robot_config.HybridActionIndex)
_HYBRID_POS_INDEX = robot_config.HybridActionIndex.POSITION.value
_HYBRID_KP_INDEX = robot_config.HybridActionIndex.POSITION_GAIN.value
_HYBRID_VEL_INDEX = robot_config.HybridActionIndex.VELOCITY.value
_HYBRID_KD_INDEX = robot_config.HybridActionIndex.VELOCITY_GAIN.value
_HYBRID_TORQUE_INDEX = robot_config.HybridActionIndex.TORQUE.value
def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):
"""Converts the inputs to a numpy array.
Args:
inputs: The input scalar or array.
dim: The dimension of the converted numpy array.
Returns:
The converted numpy array.
Raises:
ValueError: If the inputs is an array whose dimension does not match the
provied dimension.
"""
outputs = None
if isinstance(inputs, (tuple, np.ndarray)):
outputs = np.array(inputs)
else:
outputs = np.full(dim, inputs)
if len(outputs) != dim:
raise ValueError("The inputs array has a different dimension {}"
" than provided, which is {}.".format(len(outputs), dim))
return outputs
@gin.configurable
class HybridMotorModel(object):
"""A simple motor model that supports proportional and derivative control.
When in POSITION mode, the torque is calculated according to the difference
between current and desired joint angle, as well as the joint velocity
differences. For more information about PD control, please refer to:
https://en.wikipedia.org/wiki/PID_controller.
The model supports a HYBRID mode in which each motor command can be a tuple
(desired_motor_angle, position_gain, desired_motor_velocity, velocity_gain,
torque).
"""
def __init__(
self,
num_motors: int,
pd_latency: float = 0,
motor_control_mode=robot_config.MotorControlMode.POSITION,
kp: Union[float, Tuple[float], np.ndarray] = 60,
kd: Union[float, Tuple[float], np.ndarray] = 1,
strength_ratios: Union[float, Tuple[float], np.ndarray] = 1,
torque_lower_limits: Union[float, Tuple[float], np.ndarray] = None,
torque_upper_limits: Union[float, Tuple[float], np.ndarray] = None,
):
"""Initializes the class.
Args:
num_motors: The number of motors for parallel computation.
pd_latency: Simulates the motor controller's latency in reading motor
angles and velocities.
motor_control_mode: Can be POSITION, TORQUE, or HYBRID. In POSITION
control mode, the PD formula is used to track a desired position and a
zero desired velocity. In TORQUE control mode, we assume a pass through
of the provided torques. In HYBRID control mode, the users need to
provie (desired_position, position_gain, desired_velocity,
velocity_gain, feedfoward_torque) for each motor.
kp: The default position gains for motors.
kd: The default velocity gains for motors.
strength_ratios: The scaling ratio for motor torque outputs. This can be
useful for quick debugging when sim-to-real gap is observed in the
actuator behavior.
torque_lower_limits: The lower bounds for torque outputs.
torque_upper_limits: The upper bounds for torque outputs. The output
torques will be clipped by the lower and upper bounds.
Raises:
ValueError: If the number of motors provided is negative or zero.
"""
if num_motors <= 0:
raise ValueError(
"Number of motors must be positive, not {}".format(num_motors))
self._num_motors = num_motors
self._zero_array = np.full(num_motors, 0)
self._pd_latency = pd_latency
self._hybrid_command_dim = _HYBRID_ACTION_LEN * self._num_motors
self.set_motor_gains(kp, kd)
self.set_strength_ratios(strength_ratios)
self._torque_lower_limits = None
if torque_lower_limits:
self._torque_lower_limits = _convert_to_np_array(torque_lower_limits,
self._num_motors)
self._torque_upper_limits = None
if torque_upper_limits:
self._torque_upper_limits = _convert_to_np_array(torque_upper_limits,
self._num_motors)
self._motor_control_mode = motor_control_mode
# The history buffer is used to simulate the pd latency effect.
# TODO(b/157786642): remove hacks on duplicate timestep once the sim clock
# is fixed.
self._observation_buffer = time_ordered_buffer.TimeOrderedBuffer(
max_buffer_timespan=pd_latency,
error_on_duplicate_timestamp=False,
replace_value_on_duplicate_timestamp=True)
def set_strength_ratios(
self,
strength_ratios: Union[float, Tuple[float], np.ndarray],
):
"""Sets the strength of each motor relative to the default value.
Args:
strength_ratios: The relative strength of motor output, ranging from [0,
1] inclusive.
"""
self._strength_ratios = np.clip(
_convert_to_np_array(strength_ratios, self._num_motors), 0, 1)
def set_motor_gains(
self,
kp: Union[float, Tuple[float], np.ndarray],
kd: Union[float, Tuple[float], np.ndarray],
):
"""Sets the gains of all motors.
These gains are PD gains for motor positional control. kp is the
proportional gain and kd is the derivative gain.
Args:
kp: Proportional gain of the motors.
kd: Derivative gain of the motors.
"""
self._kp = _convert_to_np_array(kp, self._num_motors)
self._kd = _convert_to_np_array(kd, self._num_motors)
def get_motor_gains(self):
"""Get the PD gains of all motors.
Returns:
Proportional and derivative gain of the motors.
"""
return self._kp, self._kd
def reset(self):
self._observation_buffer.reset()
def update(self, timestamp, true_motor_positions: np.ndarray,
true_motor_velocities: np.ndarray):
# Push these to the buffer
self._observation_buffer.add(timestamp,
(true_motor_positions, true_motor_velocities))
def get_motor_torques(
self,
motor_commands: np.ndarray,
motor_control_mode=None) -> Tuple[np.ndarray, np.ndarray]:
"""Computes the motor torques.
Args:
motor_commands: The desired motor angle if the motor is in position
control mode. The pwm signal if the motor is in torque control mode.
motor_control_mode: A MotorControlMode enum.
Returns:
observed_torque: The torque observed. This emulates the limitations in
torque measurement, which is generally obtained from current estimations.
actual_torque: The torque that needs to be applied to the motor.
Raises:
NotImplementedError if the motor_control_mode is not supported.
"""
if not motor_control_mode:
motor_control_mode = self._motor_control_mode
motor_torques = None
if motor_control_mode is robot_config.MotorControlMode.TORQUE:
motor_torques = motor_commands
if motor_control_mode is robot_config.MotorControlMode.POSITION:
motor_torques = self._compute_pd_torques(
desired_motor_angles=motor_commands,
kp=self._kp,
desired_motor_velocities=self._zero_array,
kd=self._kd)
if motor_control_mode is robot_config.MotorControlMode.HYBRID:
motor_torques = self._compute_hybrid_action_torques(motor_commands)
if motor_torques is None:
raise ValueError(
"{} is not a supported motor control mode".format(motor_control_mode))
# Rescale and clip the motor torques as needed.
motor_torques = self._strength_ratios * motor_torques
if (self._torque_lower_limits is not None or
self._torque_upper_limits is not None):
motor_torques = np.clip(motor_torques, self._torque_lower_limits,
self._torque_upper_limits)
return motor_torques, motor_torques
def get_motor_states(self, latency=None):
"""Computes observation of motor angle and velocity under latency."""
if latency is None:
latency = self._pd_latency
buffer = self._observation_buffer.get_delayed_value(latency)
angle_vel_t0 = buffer.value_0
angle_vel_t1 = buffer.value_1
coeff = buffer.coeff
pos_idx = 0
motor_angles = angle_vel_t0[pos_idx] * (
1 - coeff) + coeff * angle_vel_t1[pos_idx]
vel_idx = 1
motor_velocities = angle_vel_t0[vel_idx] * (
1 - coeff) + coeff * angle_vel_t1[vel_idx]
return motor_angles, motor_velocities
def _compute_pd_torques(
self,
desired_motor_angles: np.ndarray,
kp: np.ndarray,
desired_motor_velocities,
kd: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Computes the pd torques.
Args:
desired_motor_angles: The motor angles to track.
kp: The position gains.
desired_motor_velocities: The motor velocities to track.
kd: The velocity gains.
Returns:
The computed motor torques.
"""
motor_angles, motor_velocities = self.get_motor_states()
motor_torques = -kp * (motor_angles - desired_motor_angles) - kd * (
motor_velocities - desired_motor_velocities)
return motor_torques
def _compute_hybrid_action_torques(
self, motor_commands: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Computes the pd torques in the HYBRID mode."""
assert len(motor_commands) == self._hybrid_command_dim
kp = motor_commands[_HYBRID_KP_INDEX::_HYBRID_ACTION_LEN]
kd = motor_commands[_HYBRID_KD_INDEX::_HYBRID_ACTION_LEN]
desired_motor_angles = motor_commands[_HYBRID_POS_INDEX::_HYBRID_ACTION_LEN]
desired_motor_velocities = motor_commands[
_HYBRID_VEL_INDEX::_HYBRID_ACTION_LEN]
additional_torques = motor_commands[
_HYBRID_TORQUE_INDEX::_HYBRID_ACTION_LEN]
return self._compute_pd_torques(desired_motor_angles, kp,
desired_motor_velocities,
kd) + additional_torques
| 36.767025
| 80
| 0.707545
|
4a15425ed45dfac53812c0bf482d4a8c353743ef
| 4,937
|
py
|
Python
|
model.py
|
Yixin-Liu-323/SUBLIME
|
f8ac29a124b60e80557a9e405e9c3d784e52279a
|
[
"MIT"
] | 19
|
2022-01-14T12:07:13.000Z
|
2022-03-30T03:18:43.000Z
|
model.py
|
Yixin-Liu-323/SUBLIME
|
f8ac29a124b60e80557a9e405e9c3d784e52279a
|
[
"MIT"
] | null | null | null |
model.py
|
Yixin-Liu-323/SUBLIME
|
f8ac29a124b60e80557a9e405e9c3d784e52279a
|
[
"MIT"
] | 3
|
2022-01-14T08:29:16.000Z
|
2022-03-24T02:25:34.000Z
|
import copy
import math
import torch
from graph_learners import *
from layers import GCNConv_dense, GCNConv_dgl, SparseDropout
from torch.nn import Sequential, Linear, ReLU
# GCN for evaluation.
class GCN(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout, dropout_adj, Adj, sparse):
super(GCN, self).__init__()
self.layers = nn.ModuleList()
if sparse:
self.layers.append(GCNConv_dgl(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.layers.append(GCNConv_dgl(hidden_channels, hidden_channels))
self.layers.append(GCNConv_dgl(hidden_channels, out_channels))
else:
self.layers.append(GCNConv_dense(in_channels, hidden_channels))
for i in range(num_layers - 2):
self.layers.append(GCNConv_dense(hidden_channels, hidden_channels))
self.layers.append(GCNConv_dense(hidden_channels, out_channels))
self.dropout = dropout
self.dropout_adj_p = dropout_adj
self.Adj = Adj
self.Adj.requires_grad = False
self.sparse = sparse
if self.sparse:
self.dropout_adj = SparseDropout(dprob=dropout_adj)
else:
self.dropout_adj = nn.Dropout(p=dropout_adj)
def forward(self, x):
if self.sparse:
Adj = copy.deepcopy(self.Adj)
Adj.edata['w'] = F.dropout(Adj.edata['w'], p=self.dropout_adj_p, training=self.training)
else:
Adj = self.dropout_adj(self.Adj)
for i, conv in enumerate(self.layers[:-1]):
x = conv(x, Adj)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.layers[-1](x, Adj)
return x
class GraphEncoder(nn.Module):
def __init__(self, nlayers, in_dim, hidden_dim, emb_dim, proj_dim, dropout, dropout_adj, sparse):
super(GraphEncoder, self).__init__()
self.dropout = dropout
self.dropout_adj_p = dropout_adj
self.sparse = sparse
self.gnn_encoder_layers = nn.ModuleList()
if sparse:
self.gnn_encoder_layers.append(GCNConv_dgl(in_dim, hidden_dim))
for _ in range(nlayers - 2):
self.gnn_encoder_layers.append(GCNConv_dgl(hidden_dim, hidden_dim))
self.gnn_encoder_layers.append(GCNConv_dgl(hidden_dim, emb_dim))
else:
self.gnn_encoder_layers.append(GCNConv_dense(in_dim, hidden_dim))
for _ in range(nlayers - 2):
self.gnn_encoder_layers.append(GCNConv_dense(hidden_dim, hidden_dim))
self.gnn_encoder_layers.append(GCNConv_dense(hidden_dim, emb_dim))
if self.sparse:
self.dropout_adj = SparseDropout(dprob=dropout_adj)
else:
self.dropout_adj = nn.Dropout(p=dropout_adj)
self.proj_head = Sequential(Linear(emb_dim, proj_dim), ReLU(inplace=True),
Linear(proj_dim, proj_dim))
def forward(self,x, Adj_, branch=None):
if self.sparse:
if branch == 'anchor':
Adj = copy.deepcopy(Adj_)
else:
Adj = Adj_
Adj.edata['w'] = F.dropout(Adj.edata['w'], p=self.dropout_adj_p, training=self.training)
else:
Adj = self.dropout_adj(Adj_)
for conv in self.gnn_encoder_layers[:-1]:
x = conv(x, Adj)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.gnn_encoder_layers[-1](x, Adj)
z = self.proj_head(x)
return z, x
class GCL(nn.Module):
def __init__(self, nlayers, in_dim, hidden_dim, emb_dim, proj_dim, dropout, dropout_adj, sparse):
super(GCL, self).__init__()
self.encoder = GraphEncoder(nlayers, in_dim, hidden_dim, emb_dim, proj_dim, dropout, dropout_adj, sparse)
def forward(self, x, Adj_, branch=None):
z, embedding = self.encoder(x, Adj_, branch)
return z, embedding
@staticmethod
def calc_loss(x, x_aug, temperature=0.2, sym=True):
batch_size, _ = x.size()
x_abs = x.norm(dim=1)
x_aug_abs = x_aug.norm(dim=1)
sim_matrix = torch.einsum('ik,jk->ij', x, x_aug) / torch.einsum('i,j->ij', x_abs, x_aug_abs)
sim_matrix = torch.exp(sim_matrix / temperature)
pos_sim = sim_matrix[range(batch_size), range(batch_size)]
if sym:
loss_0 = pos_sim / (sim_matrix.sum(dim=0) - pos_sim)
loss_1 = pos_sim / (sim_matrix.sum(dim=1) - pos_sim)
loss_0 = - torch.log(loss_0).mean()
loss_1 = - torch.log(loss_1).mean()
loss = (loss_0 + loss_1) / 2.0
return loss
else:
loss_1 = pos_sim / (sim_matrix.sum(dim=1) - pos_sim)
loss_1 = - torch.log(loss_1).mean()
return loss_1
| 37.976923
| 114
| 0.611302
|
4a1544014326ff4707daf9e4bc3b8eb434105165
| 1,590
|
py
|
Python
|
u24_lymphocyte/third_party/treeano/visualization.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 45
|
2015-04-26T04:45:51.000Z
|
2022-01-24T15:03:55.000Z
|
u24_lymphocyte/third_party/treeano/visualization.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 8
|
2018-07-20T20:54:51.000Z
|
2020-06-12T05:36:04.000Z
|
u24_lymphocyte/third_party/treeano/visualization.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 22
|
2018-05-21T23:57:20.000Z
|
2022-02-21T00:48:32.000Z
|
import pylab
import networkx as nx
import theano
import theano.tensor as T
def _plot_graph(graph, filename=None, node_size=500):
nx.draw_networkx(
graph,
nx.graphviz_layout(graph),
node_size=node_size)
if filename is None:
pylab.show()
else:
pylab.savefig(filename)
def plot_architectural_tree(network, *args, **kwargs):
return _plot_graph(network.graph.architectural_tree, *args, **kwargs)
def plot_computation_graph(network, *args, **kwargs):
return _plot_graph(network.graph.computation_graph, *args, **kwargs)
def pydotprint_network(network,
outfile=None,
variables=None,
include_updates=True,
*args,
**kwargs):
network.build()
if variables is None:
vws = network.relative_network(
network.root_node
).find_vws_in_subtree()
variables = [vw.variable for vw in vws]
if include_updates:
variables += [v for _, v in network.update_deltas.to_updates()]
else:
# TODO search through update deltas for which ones apply to the
# given variables
assert not include_updates, ("include_updates is currently only "
"for showing all variables")
variables = [network.network_variable(v) for v in variables]
theano.printing.pydotprint(fct=variables,
outfile=outfile,
*args,
**kwargs)
| 31.176471
| 75
| 0.584277
|
4a1545ff55e783296a5994a9e26c071a65e1efa0
| 10,239
|
py
|
Python
|
TADV/attacks/gmsa_attack.py
|
jfc43/eval-transductive-robustness
|
91aea64cc69be1e3f4d14f94de9ff976c8c307df
|
[
"Apache-2.0"
] | null | null | null |
TADV/attacks/gmsa_attack.py
|
jfc43/eval-transductive-robustness
|
91aea64cc69be1e3f4d14f94de9ff976c8c307df
|
[
"Apache-2.0"
] | null | null | null |
TADV/attacks/gmsa_attack.py
|
jfc43/eval-transductive-robustness
|
91aea64cc69be1e3f4d14f94de9ff976c8c307df
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import torch
import torch.nn as nn
class GMSAMINLinfPGDAttack:
"""
GMSA-MIN Attack with order=Linf
:param eps: maximum distortion.
:param nb_iter: number of iterations.
:param eps_iter: attack step size.
:param rand_init: (optional bool) random initialization.
:param clip_min: mininum value per input dimension.
:param clip_max: maximum value per input dimension.
:param targeted: if the attack is targeted.
"""
def __init__(
self, models, eps=0.1, nb_iter=100,
eps_iter=0.01, rand_init=True, clip_min=0., clip_max=1.,
targeted=False, num_classes=10, elementwise_best=False):
self.eps = eps
self.nb_iter = nb_iter
self.eps_iter = eps_iter
self.rand_init = rand_init
self.targeted = targeted
self.elementwise_best = elementwise_best
self.models = models
self.num_classes = num_classes
self.loss_func = nn.CrossEntropyLoss(reduction='none')
self.clip_min = clip_min
self.clip_max = clip_max
def get_loss(self, x, y):
min_loss = None
for model in self.models:
outputs = model(x)
curr_loss = self.loss_func(outputs, y)
if min_loss is None:
min_loss = curr_loss
else:
cond = curr_loss.data < min_loss.data
min_loss[cond] = curr_loss[cond]
return min_loss
def perturb(self, x, y):
"""
Given examples (x, y), returns their adversarial counterparts with
an attack length of eps.
:param x: input tensor.
:param y: label tensor.
:return: tensor containing perturbed inputs.
"""
for model in self.models:
model.eval()
x = x.detach().clone()
y = y.detach().clone()
y = y.cuda()
delta = torch.zeros_like(x)
delta = nn.Parameter(delta)
delta.requires_grad_()
if self.elementwise_best:
with torch.no_grad():
loss = self.get_loss(x, y)
worst_loss = loss.data.clone()
worst_perb = delta.data.clone()
if self.rand_init:
delta.data.uniform_(-self.eps, self.eps)
delta.data = (torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data)
for ii in range(self.nb_iter*len(self.models)):
adv_x = x + delta
loss = self.get_loss(adv_x, y)
if self.elementwise_best:
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
loss.mean().backward()
grad_sign = delta.grad.data.sign()
delta.data = delta.data + grad_sign * self.eps_iter
delta.data = torch.clamp(delta.data, min=-self.eps, max=self.eps)
delta.data = torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data
delta.grad.data.zero_()
if self.elementwise_best:
adv_x = x + delta
with torch.no_grad():
loss = self.get_loss(adv_x, y)
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
adv_x = x + worst_perb
else:
adv_x = x + delta.data
return adv_x
class GMSAAVGLinfPGDAttack:
"""
GMSA-AVG Attack with order=Linf
:param eps: maximum distortion.
:param nb_iter: number of iterations.
:param eps_iter: attack step size.
:param rand_init: (optional bool) random initialization.
:param clip_min: mininum value per input dimension.
:param clip_max: maximum value per input dimension.
:param targeted: if the attack is targeted.
"""
def __init__(
self, models, eps=0.1, nb_iter=100,
eps_iter=0.01, rand_init=True, clip_min=0., clip_max=1.,
targeted=False, num_classes=10, elementwise_best=False):
self.eps = eps
self.nb_iter = nb_iter
self.eps_iter = eps_iter
self.rand_init = rand_init
self.targeted = targeted
self.elementwise_best = elementwise_best
self.models = models
self.num_classes = num_classes
self.loss_func = nn.CrossEntropyLoss(reduction='none')
self.clip_min = clip_min
self.clip_max = clip_max
def get_loss(self, x, y, update=False):
loss = 0.0
for model in self.models:
outputs = model(x)
if self.targeted:
target = ((y + torch.randint(1, self.num_classes, y.shape).cuda()) % self.num_classes).long()
curr_loss = -self.loss_func(outputs, target)
else:
curr_loss = self.loss_func(outputs, y)
if update:
curr_loss.mean().backward()
loss += curr_loss.data
return loss
def perturb(self, x, y):
"""
Given examples (x, y), returns their adversarial counterparts with
an attack length of eps.
:param x: input tensor.
:param y: label tensor.
:return: tensor containing perturbed inputs.
"""
for model in self.models:
model.eval()
x = x.detach().clone()
y = y.detach().clone()
y = y.cuda()
delta = torch.zeros_like(x)
delta = nn.Parameter(delta)
delta.requires_grad_()
if self.elementwise_best:
with torch.no_grad():
loss = self.get_loss(x, y, update=False)
worst_loss = loss.data.clone()
worst_perb = delta.data.clone()
if self.rand_init:
delta.data.uniform_(-self.eps, self.eps)
delta.data = (torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data)
for ii in range(self.nb_iter):
adv_x = x + delta
loss = self.get_loss(adv_x, y, update=True)
if self.elementwise_best:
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
grad_sign = delta.grad.data.sign()
delta.data = delta.data + grad_sign * self.eps_iter
delta.data = torch.clamp(delta.data, min=-self.eps, max=self.eps)
delta.data = torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data
delta.grad.data.zero_()
if self.elementwise_best:
adv_x = x + delta
with torch.no_grad():
loss = self.get_loss(adv_x, y, update=False)
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
adv_x = x + worst_perb
else:
adv_x = x + delta.data
return adv_x
class LinfPGDAttack:
"""
PGD Attack with order=Linf
:param eps: maximum distortion.
:param nb_iter: number of iterations.
:param eps_iter: attack step size.
:param rand_init: (optional bool) random initialization.
:param clip_min: mininum value per input dimension.
:param clip_max: maximum value per input dimension.
:param targeted: if the attack is targeted.
"""
def __init__(
self, model, eps=0.1, nb_iter=100,
eps_iter=0.01, rand_init=True, clip_min=0., clip_max=1.,
targeted=False, num_classes=10, elementwise_best=False):
self.eps = eps
self.nb_iter = nb_iter
self.eps_iter = eps_iter
self.rand_init = rand_init
self.targeted = targeted
self.elementwise_best = elementwise_best
self.model = model
self.num_classes = num_classes
self.loss_func = nn.CrossEntropyLoss(reduction='none')
self.clip_min = clip_min
self.clip_max = clip_max
def get_loss(self, x, y):
outputs = self.model(x)
if self.targeted:
target = ((y + torch.randint(1, self.num_classes, y.shape).cuda()) % self.num_classes).long()
loss = -self.loss_func(outputs, target)
else:
loss = self.loss_func(outputs, y)
return loss
def perturb(self, x, y):
"""
Given examples (x, y), returns their adversarial counterparts with
an attack length of eps.
:param x: input tensor.
:param y: label tensor.
:return: tensor containing perturbed inputs.
"""
self.model.eval()
x = x.detach().clone()
y = y.detach().clone()
y = y.cuda()
delta = torch.zeros_like(x)
delta = nn.Parameter(delta)
delta.requires_grad_()
if self.elementwise_best:
loss = self.get_loss(x, y)
worst_loss = loss.data.clone()
worst_perb = delta.data.clone()
if self.rand_init:
delta.data.uniform_(-self.eps, self.eps)
delta.data = (torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data)
for ii in range(self.nb_iter):
adv_x = x + delta
loss = self.get_loss(adv_x, y)
if self.elementwise_best:
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
loss.mean().backward()
grad_sign = delta.grad.data.sign()
delta.data = delta.data + grad_sign * self.eps_iter
delta.data = torch.clamp(delta.data, min=-self.eps, max=self.eps)
delta.data = torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data
delta.grad.data.zero_()
if self.elementwise_best:
adv_x = x + delta
loss = self.get_loss(adv_x, y)
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
adv_x = x + worst_perb
else:
adv_x = x + delta.data
return adv_x
| 32.401899
| 109
| 0.573884
|
4a15489f4047d4bfb5b4e9a767618611aa3a258f
| 2,046
|
py
|
Python
|
tests/smartdatamodels/smartcities/test_building.py
|
Orange-OpenSource/python-ngsild-client
|
23ff31506aabd23c75befece1fb3d4536903cb2a
|
[
"Apache-2.0"
] | 7
|
2022-02-25T09:55:28.000Z
|
2022-03-25T20:48:01.000Z
|
tests/smartdatamodels/smartcities/test_building.py
|
Orange-OpenSource/python-ngsild-client
|
23ff31506aabd23c75befece1fb3d4536903cb2a
|
[
"Apache-2.0"
] | null | null | null |
tests/smartdatamodels/smartcities/test_building.py
|
Orange-OpenSource/python-ngsild-client
|
23ff31506aabd23c75befece1fb3d4536903cb2a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Software Name: ngsildclient
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: Apache 2.0
#
# This software is distributed under the Apache 2.0;
# see the NOTICE file for more details.
#
# Author: Fabien BATTELLO <fabien.battello@orange.com> et al.
import pkg_resources
import json
from geojson import Polygon
from datetime import time
from ngsildclient import Entity, PostalAddressBuilder, OpeningHoursBuilder
def expected_dict(basename: str) -> dict:
filename: str = pkg_resources.resource_filename(
__name__, f"data/building/{basename}.json"
)
with open(filename, "r") as fp:
expected = json.load(fp)
return expected
def test_building():
"""
https://smart-data-models.github.io/dataModel.Building/Building/examples/example-normalized.jsonld
"""
polygon = Polygon([[(100, 0), (101, 0), (101, 1), (100, 1), (100, 0)]])
e = Entity("Building", "building-a85e3da145c1")
e.addr(
PostalAddressBuilder()
.locality("London")
.postalcode("EC4N 8AF")
.street("25 Walbrook")
.build()
)
e.prop("category", ["office"])
e.gprop("containedInPlace", polygon)
e.prop("dataProvider", "OperatorA").prop("description", "Office block")
e.prop("floorsAboveGround", 7).prop("floorsBelowGround", 0)
e.loc(polygon)
e.prop("mapUrl", "http://www.example.com")
e.rel("occupier", "Person:9830f692-7677-11e6-838b-4f9fb3dc5a4f")
e.prop(
"openingHours",
OpeningHoursBuilder()
.monday(time(10), time(19))
.tuesday(time(10), time(19))
.saturday(time(10), time(22))
.sunday(time(10), time(21))
.build(),
)
e.rel(
"owner",
[
"cdfd9cb8-ae2b-47cb-a43a-b9767ffd5c84",
"1be9cd61-ef59-421f-a326-4b6c84411ad4",
],
)
e.prop("source", "http://www.example.com")
assert e.to_dict() == expected_dict("building")
assert e.to_dict(kv=True) == expected_dict("building.kv")
| 29.652174
| 102
| 0.636852
|
4a15492167cc2f0db315d42fdafce14a605393d9
| 1,207
|
py
|
Python
|
sem_seg/eval_iou_accuracy.py
|
Enigma-li/pointnet
|
0728485a05d76bdaf48cd99af7a8132d3af474b7
|
[
"MIT"
] | null | null | null |
sem_seg/eval_iou_accuracy.py
|
Enigma-li/pointnet
|
0728485a05d76bdaf48cd99af7a8132d3af474b7
|
[
"MIT"
] | null | null | null |
sem_seg/eval_iou_accuracy.py
|
Enigma-li/pointnet
|
0728485a05d76bdaf48cd99af7a8132d3af474b7
|
[
"MIT"
] | null | null | null |
import numpy as np
pred_data_label_filenames = [line.rstrip() for line in open('all_pred_data_label_filelist.txt')]
gt_label_filenames = [f.rstrip('_pred\.txt') + '_gt.txt' for f in pred_data_label_filenames]
num_room = len(gt_label_filenames)
gt_classes = [0 for _ in range(13)]
positive_classes = [0 for _ in range(13)]
true_positive_classes = [0 for _ in range(13)]
for i in range(num_room):
print(i)
data_label = np.loadtxt(pred_data_label_filenames[i])
pred_label = data_label[:, -1]
gt_label = np.loadtxt(gt_label_filenames[i])
print(gt_label.shape)
for j in range(gt_label.shape[0]):
gt_l = int(gt_label[j])
pred_l = int(pred_label[j])
gt_classes[gt_l] += 1
positive_classes[pred_l] += 1
true_positive_classes[gt_l] += int(gt_l == pred_l)
print(gt_classes)
print(positive_classes)
print(true_positive_classes)
print('Overall accuracy: {0}'.format(sum(true_positive_classes) / float(sum(positive_classes))))
print('IoU:')
iou_list = []
for i in range(13):
iou = true_positive_classes[i] / float(gt_classes[i] + positive_classes[i] - true_positive_classes[i])
print(iou)
iou_list.append(iou)
print(sum(iou_list) / 13.0)
| 32.621622
| 106
| 0.707539
|
4a154bb3479f329732add0e2eca55287d3a0b473
| 3,897
|
py
|
Python
|
get_loader.py
|
AmruthaReddy1397/pythonProject
|
cfb900b5e65812030f5e06b8424396924c1b10cc
|
[
"MIT"
] | null | null | null |
get_loader.py
|
AmruthaReddy1397/pythonProject
|
cfb900b5e65812030f5e06b8424396924c1b10cc
|
[
"MIT"
] | null | null | null |
get_loader.py
|
AmruthaReddy1397/pythonProject
|
cfb900b5e65812030f5e06b8424396924c1b10cc
|
[
"MIT"
] | null | null | null |
# import os for loading file paths
# import pandas for lookup in annotation file
# import spacy for tokenizer
# pad_sequence for pad batch
# import Image to load image
import os
import pandas as pd
import spacy
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import torchvision.transforms as transforms
spacy_eng = spacy.load("en_core_web_sm")
class Vocabulary:
def __init__(self, freq_threshold):
self.itos = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
self.stoi = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
self.freq_threshold = freq_threshold
def __len__(self):
return len(self.itos)
@staticmethod
def tokenizer_eng(text):
return [tok.text.lower() for tok in spacy_eng.tokenizer(text)]
def build_vocabulary(self, sentence_list):
frequencies = {}
idx = 4
for sentence in sentence_list:
for word in self.tokenizer_eng(sentence):
if word not in frequencies:
frequencies[word] = 1
else:
frequencies[word] += 1
if frequencies[word] == self.freq_threshold:
self.stoi[word] = idx
self.itos[idx] = word
idx += 1
def numericalize(self, text):
tokenized_text = self.tokenizer_eng(text)
return [
self.stoi[token] if token in self.stoi else self.stoi["<UNK>"]
for token in tokenized_text
]
class FlickrDataset(Dataset):
def __init__(self, root_dir, captions_file, transform=None, freq_threshold=5):
self.root_dir = root_dir
self.df = pd.read_csv(captions_file)
self.transform = transform
# Get img, caption columns
self.imgs = self.df["image"]
self.captions = self.df["caption"]
# Initialize vocabulary and build vocab
self.vocab = Vocabulary(freq_threshold)
self.vocab.build_vocabulary(self.captions.tolist())
def __len__(self):
return len(self.df)
def __getitem__(self, index):
caption = self.captions[index]
img_id = self.imgs[index]
img = Image.open(os.path.join(self.root_dir, img_id)).convert("RGB")
if self.transform is not None:
img = self.transform(img)
numericalized_caption = [self.vocab.stoi["<SOS>"]]
numericalized_caption += self.vocab.numericalize(caption)
numericalized_caption.append(self.vocab.stoi["<EOS>"])
return img, torch.tensor(numericalized_caption)
class MyCollate:
def __init__(self, pad_idx):
self.pad_idx = pad_idx
def __call__(self, batch):
imgs = [item[0].unsqueeze(0) for item in batch]
imgs = torch.cat(imgs, dim=0)
targets = [item[1] for item in batch]
targets = pad_sequence(targets, batch_first=False, padding_value=self.pad_idx)
return imgs, targets
def get_loader(
root_folder,
annotation_file,
transform,
batch_size=32,
num_workers=8,
shuffle=True,
pin_memory=True,
):
dataset = FlickrDataset(root_folder, annotation_file, transform=transform)
pad_idx = dataset.vocab.stoi["<PAD>"]
loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
pin_memory=pin_memory,
collate_fn=MyCollate(pad_idx=pad_idx),
)
return loader, dataset
if __name__ == "__main__":
transform = transforms.Compose(
[transforms.Resize((224, 224)), transforms.ToTensor(),]
)
loader, dataset = get_loader(
"flickr8k/images/", "flickr8k/captions.txt", transform=transform
)
for idx, (imgs, captions) in enumerate(loader):
print(imgs.shape)
print(captions.shape)
| 27.835714
| 86
| 0.628176
|
4a154c3aded22f53ee813a79156a7425555466f3
| 8,123
|
py
|
Python
|
rtf/generators/python.py
|
galeone/rtf
|
99829715b503029a11e92bdd8ad66e92cbe9205b
|
[
"Apache-2.0"
] | 6
|
2019-04-18T13:53:31.000Z
|
2020-06-23T08:03:30.000Z
|
rtf/generators/python.py
|
galeone/rtf
|
99829715b503029a11e92bdd8ad66e92cbe9205b
|
[
"Apache-2.0"
] | 1
|
2019-04-20T17:31:17.000Z
|
2019-04-20T17:31:17.000Z
|
rtf/generators/python.py
|
galeone/rtf
|
99829715b503029a11e92bdd8ad66e92cbe9205b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Paolo Galeone <nessuno@nerdz.eu>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generator for the Python programming language."""
import os
import pathlib
import black
from ..proto.lib import api_objects_pb2 as tf_api_objects
from .base import Generator, Parser
class Python(Parser, Generator):
"""Parser and Generator from and to the Python programming language."""
HEADER = (
r'"""Remote {package} - machine generated."""'
"\n"
"# This file is machine generated. Do NOT edit unless you\n"
"# REALLY know what are you doing.\n\n"
"# TODO: import grpc client\n"
"\n\n"
)
@staticmethod
def build_signature(argspec):
spec = Parser._parse_argspec(argspec)
dp_len = len(spec["defaults"]) if spec["defaults"] else 0
args_count = len(spec["args"])
args = {}
for i in range(args_count - dp_len, args_count):
args[str(spec["args"][i])] = spec["defaults"][i - args_count]
signature = ""
if args_count - dp_len > 0:
signature = ",".join(spec["args"][: args_count - dp_len])
if args:
signature += ","
# The default parameteres are hard to disambiguate.
# eg. default='[None', 'categorical_hinge'] must be parsed in two different ways.
# 'None' -> must become the symbol None
# 'categorial_hinge' -> must remain the string 'categorical_hinge'.
# literal_eval just removes the string, making the defaults None, categorical_hinge.
#
# Thus, we have to handle these different scenarios.
defaults = ""
first_append = True
for param, default in args.items():
# Use a stupid heuristic: if the default parameter starts with an upper case
# character, it is likely to be a language symbol (e.g None), or a class
# static member (Variable.SOMETHING).
# If the parameter value contains the symbols "(", ")" then it might be a
# tuple, or a function call, and it should remain as it is.
# Otherwise, we treat this as a string.
if not first_append:
defaults += ", "
defaults += f"{param}="
if default[0].isupper() or "(" in default or ")" in default:
defaults += f"{default}"
else:
defaults += f"'{default}'"
first_append = False
return signature + defaults
def _write_module_members(self, fp, members):
if not members:
return
members_string_list = "[{}]".format(",".join(f"'{m.name}'" for m in members))
# First write all the getters
fp.write("\ndef __getattr__(name):\n")
fp.write(
"\tif name in {members_list}:\n"
"\t\traise ValueError('implement grcp getter call')\n".format(
members_list=members_string_list
)
)
# Now write all the setters
fp.write("\ndef __setattr__(name, value):\n")
fp.write(
"\tif name in {members_list}:\n"
"\t\traise ValueError('implement grcp setter call')\n".format(
members_list=members_string_list
)
)
def _write_module_member_methods(self, fp, member_methods):
if not member_methods:
return
for member_method in member_methods:
# optional: name, path, argspec
if member_method.HasField("name") and member_method.HasField("argspec"):
fp.write(
"\ndef {func_name}({func_signature}):\n"
"\traise ValueError('implement member method grpc call')\n".format(
func_name=member_method.name,
func_signature=self.build_signature(member_method.argspec),
)
)
def _write_class_member_methods(self, fp, member_methods):
if not member_methods:
return
for member_method in member_methods:
# optional: name, path, argspec
if member_method.HasField("name") and member_method.HasField("argspec"):
fp.write(
"\n\tdef {func_name}({func_signature}):\n"
"\t\traise ValueError('implement member method grpc call')\n".format(
func_name=member_method.name,
func_signature=self.build_signature(member_method.argspec),
)
)
def _write_class_members(self, fp, members):
if not members:
return
members_string_list = "[{}]".format(",".join(f"'{m.name}'" for m in members))
# First write all the getters
fp.write("\n\tdef __getattr__(self, name):\n")
fp.write(
"\t\tif name in {members_list}:\n"
"\t\t\traise ValueError('implement grcp getter call')\n".format(
members_list=members_string_list
)
)
# Now write all the setters
fp.write("\n\tdef __setattr__(self, name, value):\n")
fp.write(
"\t\tif name in {members_list}:\n"
"\t\t\traise ValueError('implement grcp setter call')\n".format(
members_list=members_string_list
)
)
def _write_class(self, fp, name, tf_class):
fp.write(f"class {name}:\n\n" f"\t#TODO: class attribute grpc client?\n\n")
self._write_class_members(fp, tf_class.member)
self._write_class_member_methods(fp, tf_class.member_method)
def _write_module(self, fp, tf_module):
self._write_module_members(fp, tf_module.member)
self._write_module_member_methods(fp, tf_module.member_method)
def convert(self, dest_dir, golden_proto_dict):
for path, tf_api_object in golden_proto_dict.items():
file_path = os.path.join(
dest_dir, os.path.join(path.replace(".", os.path.sep))
)
if tf_api_object.HasField("tf_module"):
file_basedir = file_path
if not os.path.exists(file_basedir):
os.makedirs(file_basedir)
file_path = os.path.join(file_basedir, "__init__.py")
if not os.path.exists(file_path):
with open(file_path, "w") as fp:
fp.write(Python.HEADER.format(package=Generator.MODULE_NAME))
with open(file_path, "a") as fp:
# TFAPIModule: repeated {member, member_method}
self._write_module(tf_api_object.tf_module)
if tf_api_object.HasField("tf_class"):
explode = file_path.split(os.path.sep)
file_basedir = os.path.sep.join(explode[:-1])
class_name = explode[-1]
if not os.path.exists(file_basedir):
os.makedirs(file_basedir)
file_path += ".py"
if not os.path.exists(file_path):
with open(file_path, "w") as fp:
fp.write(Python.HEADER.format(package=Generator.MODULE_NAME))
with open(file_path, "a") as fp:
self._write_class(fp, class_name, tf_api_object.tf_class)
# Format the file
black.format_file_in_place(
pathlib.Path(file_path),
fast=True,
mode=black.FileMode(),
write_back=black.WriteBack.YES,
)
break
| 39.052885
| 92
| 0.578604
|
4a154c746c30f17cb0dcbc31855a89f17c3581d2
| 443
|
py
|
Python
|
Latte/ex9.py
|
Latte-inc/Learn-Python3.6
|
f3568cf2f8413f8730c2297bc39ae890bb82d962
|
[
"CC0-1.0"
] | 1
|
2021-10-15T05:43:19.000Z
|
2021-10-15T05:43:19.000Z
|
Latte/ex9.py
|
Latte-inc/Learn-Python3.6
|
f3568cf2f8413f8730c2297bc39ae890bb82d962
|
[
"CC0-1.0"
] | null | null | null |
Latte/ex9.py
|
Latte-inc/Learn-Python3.6
|
f3568cf2f8413f8730c2297bc39ae890bb82d962
|
[
"CC0-1.0"
] | 1
|
2022-01-13T10:34:55.000Z
|
2022-01-13T10:34:55.000Z
|
# Print!Print! Print! Have new Exercise!
# Time 2020/05/17 01:00
# fatcat like ....
# 这段代码有一些神奇的东西(对于初学者来说)
days = "\nMon\nTue\nWed\nThu\nFri\nSat\nSun"
months = "\nJan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
# 张口就是老变量赋值了(狗头)
# 这个赋值中使用了换行符 \n 可以将字符换行显示!
print("Here are the days: ", days)
# 标准 print 输出方法 ("...", 变量名)
print("Here are the months: ", months)
# 同上
print("""
这里是一个引号使用小方法,
使用三段双引号,
可以像这样打出来,
甚至三行,四行,五行
""")
| 19.26087
| 52
| 0.641084
|
4a154c783d91c880e8a025ce02dc94dac1265600
| 675
|
py
|
Python
|
pewlang.py
|
Tasty-Kiwi/Pewlang
|
1fb9fc72a6e46ee90f4ab1f2dfb289c61b38b6b5
|
[
"WTFPL"
] | 1
|
2021-02-14T06:20:20.000Z
|
2021-02-14T06:20:20.000Z
|
pewlang.py
|
Tasty-Kiwi/Pewlang
|
1fb9fc72a6e46ee90f4ab1f2dfb289c61b38b6b5
|
[
"WTFPL"
] | null | null | null |
pewlang.py
|
Tasty-Kiwi/Pewlang
|
1fb9fc72a6e46ee90f4ab1f2dfb289c61b38b6b5
|
[
"WTFPL"
] | 1
|
2021-02-12T17:22:48.000Z
|
2021-02-12T17:22:48.000Z
|
import sys
import syntax
# Main code
if sys.argv[1] == "decode":
pew = open(sys.argv[2], "rt", encoding="utf8").read()
text = pew
for x in range(len(syntax.BRAINF)):
text = text.replace(syntax.CUSTOM_LANG[x], syntax.BRAINF[x])
f = open(sys.argv[3], "w", encoding="utf8")
f.write(text)
f.close()
print("Decoding Done.")
if sys.argv[1] == "encode":
pew = open(sys.argv[2], "rt", encoding="utf8").read()
text = pew
for x in range(len(syntax.BRAINF)):
text = text.replace(syntax.BRAINF[x], syntax.CUSTOM_LANG[x])
f = open(sys.argv[3], "w", encoding="utf8")
f.write(text)
f.close()
print("Encoding Done.")
| 28.125
| 68
| 0.597037
|
4a154d0b71b4e55e53c6705c59229b5ac7c03738
| 1,733
|
py
|
Python
|
pype/plugins/maya/publish/validate_node_ids_related.py
|
tws0002/pype
|
80b1aad9990f6c7efabf0430a3da6633054bf4a8
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/validate_node_ids_related.py
|
tws0002/pype
|
80b1aad9990f6c7efabf0430a3da6633054bf4a8
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/validate_node_ids_related.py
|
tws0002/pype
|
80b1aad9990f6c7efabf0430a3da6633054bf4a8
|
[
"MIT"
] | null | null | null |
import pyblish.api
import pype.api
import avalon.io as io
import pype.maya.action
from pype.maya import lib
class ValidateNodeIDsRelated(pyblish.api.InstancePlugin):
"""Validate nodes have a related Colorbleed Id to the instance.data[asset]
"""
order = pype.api.ValidatePipelineOrder
label = 'Node Ids Related (ID)'
hosts = ['maya']
families = ["model",
"look",
"rig"]
optional = True
actions = [pype.maya.action.SelectInvalidAction,
pype.maya.action.GenerateUUIDsOnInvalidAction]
def process(self, instance):
"""Process all nodes in instance (including hierarchy)"""
# Ensure all nodes have a cbId
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Nodes IDs found that are not related to asset "
"'{}' : {}".format(instance.data['asset'],
invalid))
@classmethod
def get_invalid(cls, instance):
"""Return the member nodes that are invalid"""
invalid = list()
asset = instance.data['asset']
asset_data = io.find_one({"name": asset,
"type": "asset"},
projection={"_id": True})
asset_id = str(asset_data['_id'])
# We do want to check the referenced nodes as we it might be
# part of the end product
for node in instance:
_id = lib.get_id(node)
if not _id:
continue
node_asset_id = _id.split(":", 1)[0]
if node_asset_id != asset_id:
invalid.append(node)
return invalid
| 29.372881
| 79
| 0.552799
|
4a154ddb544015f1de595727db2bced4efd08efb
| 967
|
py
|
Python
|
polls/models.py
|
ocben1/djangoPolls
|
9c5cc8b089fb777d5f05abd0761027ab38e6104f
|
[
"MIT"
] | null | null | null |
polls/models.py
|
ocben1/djangoPolls
|
9c5cc8b089fb777d5f05abd0761027ab38e6104f
|
[
"MIT"
] | null | null | null |
polls/models.py
|
ocben1/djangoPolls
|
9c5cc8b089fb777d5f05abd0761027ab38e6104f
|
[
"MIT"
] | null | null | null |
import datetime
from django.db import models
from django.utils import timezone
from django.contrib import admin
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
@admin.display(
boolean=True,
ordering='pub_date',
description='Published recently?',
)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200,null=True)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| 32.233333
| 71
| 0.693899
|
4a154e643bbd3a44dee6ae1d01694de1ce5f6060
| 1,950
|
py
|
Python
|
test_src/test_url_downloader.py
|
ChsHub/url_downloader
|
2dfa3dd9c7ec6024a12675c72bee3c1bdbb952fb
|
[
"MIT"
] | 1
|
2019-12-28T04:12:11.000Z
|
2019-12-28T04:12:11.000Z
|
test_src/test_url_downloader.py
|
ChsHub/url_downloader
|
2dfa3dd9c7ec6024a12675c72bee3c1bdbb952fb
|
[
"MIT"
] | null | null | null |
test_src/test_url_downloader.py
|
ChsHub/url_downloader
|
2dfa3dd9c7ec6024a12675c72bee3c1bdbb952fb
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from tempfile import TemporaryDirectory
from hypothesis import given
from hypothesis.strategies import text, integers
from url_downloader.url_downloader import _get_url_data, save_file, get_resource, _get_file_name
class SaveToDisk:
def __init__(self, file_path, url):
pass
def get(self, url, headers, timeout):
pass
@given(text())
def test__get_url_data(url: str):
tries = 1
timeout = 4
wait = 0
if url:
assert _get_url_data(url, lambda x, headers, timeout: x, tries=tries, timeout=timeout, wait=wait) == url
else:
assert _get_url_data(url, lambda x, headers, timeout: x, tries=tries, timeout=timeout, wait=wait) is None
def test__get_file_name():
assert _get_file_name('http://domain.com/filename.jpg') == 'filename.jpg'
assert _get_file_name('http://domain.com/filename.jpg?w=1101') == 'filename.jpg'
@given(text())
def test_save_file(url: str):
tries = 1
timeout = 4
wait = 0
file_name = 'file_name'
with TemporaryDirectory() as file_path:
# Test existing file
Path(file_path, file_name).touch()
assert save_file(url=url, file_path=file_path, file_name=file_name, timeout=timeout, tries=tries, wait=wait,
save_class=SaveToDisk) is True
def test_get_resource():
class TestResponse:
text = 'file_name'
tries = 1
timeout = 4
wait = 0
file_name = 'file_name'
with TemporaryDirectory() as file_path:
# Test existing file
file = Path(file_path, file_name)
file.touch()
url = file.as_uri()
result = get_resource('', timeout=timeout, wait=wait, tries=tries,
get_function=lambda x, headers, timeout: TestResponse())
assert type(result) == str
if __name__ == '__main__':
test__get_url_data()
test__get_file_name()
test_save_file()
test_get_resource()
| 27.464789
| 116
| 0.663077
|
4a154e7650156699d24b55f28ef91e66d1f9137c
| 4,245
|
py
|
Python
|
test/test_cvml/src/neuro.py
|
bacchus/bacchuslib
|
35c41b6a7244227c0779c99b3c2f98b9a8349477
|
[
"MIT"
] | null | null | null |
test/test_cvml/src/neuro.py
|
bacchus/bacchuslib
|
35c41b6a7244227c0779c99b3c2f98b9a8349477
|
[
"MIT"
] | null | null | null |
test/test_cvml/src/neuro.py
|
bacchus/bacchuslib
|
35c41b6a7244227c0779c99b3c2f98b9a8349477
|
[
"MIT"
] | null | null | null |
# https://thecodacus.com/neural-network-scratch-python-no-libraries
#import numpy as np
import math
import random
class Connection:
def __init__(self, connectedNeuron):
self.connectedNeuron = connectedNeuron
self.weight = random.random() # np.random.normal()
self.dWeight = 0.0
class Neuron:
eta = 0.001
alpha = 0.01
def __init__(self, layer):
self.dendrons = []
self.error = 0.0
self.gradient = 0.0
self.output = 0.0
if layer is None:
pass
else:
for neuron in layer:
con = Connection(neuron)
self.dendrons.append(con)
def addError(self, err):
self.error = self.error + err
def sigmoid(self, x):
return 1 / (1 + math.exp(-x * 1.0))
def dSigmoid(self, x):
return x * (1.0 - x)
def setError(self, err):
self.error = err
def setOutput(self, output):
self.output = output
def getOutput(self):
return self.output
def feedForword(self):
sumOutput = 0
if len(self.dendrons) == 0:
return
for dendron in self.dendrons:
sumOutput = sumOutput + dendron.connectedNeuron.getOutput() * dendron.weight
self.output = self.sigmoid(sumOutput)
def backPropagate(self):
self.gradient = self.error * self.dSigmoid(self.output);
for dendron in self.dendrons:
dendron.dWeight = Neuron.eta * (
dendron.connectedNeuron.output * self.gradient) + self.alpha * dendron.dWeight;
dendron.weight = dendron.weight + dendron.dWeight;
dendron.connectedNeuron.addError(dendron.weight * self.gradient);
self.error = 0;
class Network:
def __init__(self, topology):
self.layers = []
for numNeuron in topology:
layer = []
for i in range(numNeuron):
if (len(self.layers) == 0):
layer.append(Neuron(None))
else:
layer.append(Neuron(self.layers[-1]))
layer.append(Neuron(None))
layer[-1].setOutput(1)
self.layers.append(layer)
def setInput(self, inputs):
for i in range(len(inputs)):
self.layers[0][i].setOutput(inputs[i])
def feedForword(self):
for layer in self.layers[1:]:
for neuron in layer:
neuron.feedForword();
def backPropagate(self, target):
for i in range(len(target)):
self.layers[-1][i].setError(target[i] - self.layers[-1][i].getOutput())
for layer in self.layers[::-1]:
for neuron in layer:
neuron.backPropagate()
def getError(self, target):
err = 0
for i in range(len(target)):
e = (target[i] - self.layers[-1][i].getOutput())
err = err + e ** 2
err = err / len(target)
err = math.sqrt(err)
return err
def getResults(self):
output = []
for neuron in self.layers[-1]:
output.append(neuron.getOutput())
output.pop()
return output
def getThResults(self):
output = []
for neuron in self.layers[-1]:
o = neuron.getOutput()
if (o > 0.5):
o = 1
else:
o = 0
output.append(o)
output.pop()
return output
def main():
topology = []
topology.append(2)
topology.append(3)
topology.append(2)
net = Network(topology)
Neuron.eta = 0.09
Neuron.alpha = 0.015
while True:
err = 0
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
outputs = [[0, 0], [1, 0], [1, 0], [0, 1]]
for i in range(len(inputs)):
net.setInput(inputs[i])
net.feedForword()
net.backPropagate(outputs[i])
err = err + net.getError(outputs[i])
print "error: ", err
if err < 0.03:
break
while True:
a = input("type 1st input :")
b = input("type 2nd input :")
net.setInput([a, b])
net.feedForword()
print net.getThResults()
if __name__ == '__main__':
main()
| 26.867089
| 91
| 0.531684
|
4a1550b13d94837ac6115058e187145726ec841b
| 7,683
|
py
|
Python
|
tests/contrib/test_algorithms.py
|
eddloschi/pyjwt
|
edb58db19469f269f11b8233a9fd7ab408d2036a
|
[
"MIT"
] | 2
|
2021-03-07T14:46:54.000Z
|
2021-11-28T07:13:54.000Z
|
tests/contrib/test_algorithms.py
|
eddloschi/pyjwt
|
edb58db19469f269f11b8233a9fd7ab408d2036a
|
[
"MIT"
] | null | null | null |
tests/contrib/test_algorithms.py
|
eddloschi/pyjwt
|
edb58db19469f269f11b8233a9fd7ab408d2036a
|
[
"MIT"
] | 1
|
2021-10-20T02:25:15.000Z
|
2021-10-20T02:25:15.000Z
|
import base64
import pytest
from jwt.utils import force_bytes, force_unicode
from ..utils import key_path
try:
from jwt.contrib.algorithms.pycrypto import RSAAlgorithm
has_pycrypto = True
except ImportError:
has_pycrypto = False
try:
from jwt.contrib.algorithms.py_ecdsa import ECAlgorithm
has_ecdsa = True
except ImportError:
has_ecdsa = False
@pytest.mark.skipif(
not has_pycrypto, reason="Not supported without PyCrypto library"
)
class TestPycryptoAlgorithms:
def test_rsa_should_parse_pem_public_key(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("testkey2_rsa.pub.pem"), "r") as pem_key:
algo.prepare_key(pem_key.read())
def test_rsa_should_accept_unicode_key(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("testkey_rsa"), "r") as rsa_key:
algo.prepare_key(force_unicode(rsa_key.read()))
def test_rsa_should_reject_non_string_key(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with pytest.raises(TypeError):
algo.prepare_key(None)
def test_rsa_sign_should_generate_correct_signature_value(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
expected_sig = base64.b64decode(
force_bytes(
"yS6zk9DBkuGTtcBzLUzSpo9gGJxJFOGvUqN01iLhWHrzBQ9ZEz3+Ae38AXp"
"10RWwscp42ySC85Z6zoN67yGkLNWnfmCZSEv+xqELGEvBJvciOKsrhiObUl"
"2mveSc1oeO/2ujkGDkkkJ2epn0YliacVjZF5+/uDmImUfAAj8lzjnHlzYix"
"sn5jGz1H07jYYbi9diixN8IUhXeTafwFg02IcONhum29V40Wu6O5tAKWlJX"
"fHJnNUzAEUOXS0WahHVb57D30pcgIji9z923q90p5c7E2cU8V+E1qe8NdCA"
"APCDzZZ9zQ/dgcMVaBrGrgimrcLbPjueOKFgSO+SSjIElKA=="
)
)
with open(key_path("testkey_rsa"), "r") as keyfile:
jwt_key = algo.prepare_key(keyfile.read())
with open(key_path("testkey_rsa.pub"), "r") as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
algo.sign(jwt_message, jwt_key)
result = algo.verify(jwt_message, jwt_pub_key, expected_sig)
assert result
def test_rsa_verify_should_return_false_if_signature_invalid(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
jwt_sig = base64.b64decode(
force_bytes(
"yS6zk9DBkuGTtcBzLUzSpo9gGJxJFOGvUqN01iLhWHrzBQ9ZEz3+Ae38AXp"
"10RWwscp42ySC85Z6zoN67yGkLNWnfmCZSEv+xqELGEvBJvciOKsrhiObUl"
"2mveSc1oeO/2ujkGDkkkJ2epn0YliacVjZF5+/uDmImUfAAj8lzjnHlzYix"
"sn5jGz1H07jYYbi9diixN8IUhXeTafwFg02IcONhum29V40Wu6O5tAKWlJX"
"fHJnNUzAEUOXS0WahHVb57D30pcgIji9z923q90p5c7E2cU8V+E1qe8NdCA"
"APCDzZZ9zQ/dgcMVaBrGrgimrcLbPjueOKFgSO+SSjIElKA=="
)
)
jwt_sig += force_bytes("123") # Signature is now invalid
with open(key_path("testkey_rsa.pub"), "r") as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert not result
def test_rsa_verify_should_return_true_if_signature_valid(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
jwt_sig = base64.b64decode(
force_bytes(
"yS6zk9DBkuGTtcBzLUzSpo9gGJxJFOGvUqN01iLhWHrzBQ9ZEz3+Ae38AXp"
"10RWwscp42ySC85Z6zoN67yGkLNWnfmCZSEv+xqELGEvBJvciOKsrhiObUl"
"2mveSc1oeO/2ujkGDkkkJ2epn0YliacVjZF5+/uDmImUfAAj8lzjnHlzYix"
"sn5jGz1H07jYYbi9diixN8IUhXeTafwFg02IcONhum29V40Wu6O5tAKWlJX"
"fHJnNUzAEUOXS0WahHVb57D30pcgIji9z923q90p5c7E2cU8V+E1qe8NdCA"
"APCDzZZ9zQ/dgcMVaBrGrgimrcLbPjueOKFgSO+SSjIElKA=="
)
)
with open(key_path("testkey_rsa.pub"), "r") as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert result
def test_rsa_prepare_key_should_be_idempotent(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("testkey_rsa.pub"), "r") as keyfile:
jwt_pub_key_first = algo.prepare_key(keyfile.read())
jwt_pub_key_second = algo.prepare_key(jwt_pub_key_first)
assert jwt_pub_key_first == jwt_pub_key_second
@pytest.mark.skipif(
not has_ecdsa, reason="Not supported without ecdsa library"
)
class TestEcdsaAlgorithms:
def test_ec_should_reject_non_string_key(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
with pytest.raises(TypeError):
algo.prepare_key(None)
def test_ec_should_accept_unicode_key(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
with open(key_path("testkey_ec"), "r") as ec_key:
algo.prepare_key(force_unicode(ec_key.read()))
def test_ec_sign_should_generate_correct_signature_value(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
expected_sig = base64.b64decode(
force_bytes(
"AC+m4Jf/xI3guAC6w0w37t5zRpSCF6F4udEz5LiMiTIjCS4vcVe6dDOxK+M"
"mvkF8PxJuvqxP2CO3TR3okDPCl/NjATTO1jE+qBZ966CRQSSzcCM+tzcHzw"
"LZS5kbvKu0Acd/K6Ol2/W3B1NeV5F/gjvZn/jOwaLgWEUYsg0o4XVrAg65"
)
)
with open(key_path("testkey_ec"), "r") as keyfile:
jwt_key = algo.prepare_key(keyfile.read())
with open(key_path("testkey_ec.pub"), "r") as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
algo.sign(jwt_message, jwt_key)
result = algo.verify(jwt_message, jwt_pub_key, expected_sig)
assert result
def test_ec_verify_should_return_false_if_signature_invalid(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
jwt_sig = base64.b64decode(
force_bytes(
"AC+m4Jf/xI3guAC6w0w37t5zRpSCF6F4udEz5LiMiTIjCS4vcVe6dDOxK+M"
"mvkF8PxJuvqxP2CO3TR3okDPCl/NjATTO1jE+qBZ966CRQSSzcCM+tzcHzw"
"LZS5kbvKu0Acd/K6Ol2/W3B1NeV5F/gjvZn/jOwaLgWEUYsg0o4XVrAg65"
)
)
jwt_sig += force_bytes("123") # Signature is now invalid
with open(key_path("testkey_ec.pub"), "r") as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert not result
def test_ec_verify_should_return_true_if_signature_valid(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
jwt_message = force_bytes("Hello World!")
jwt_sig = base64.b64decode(
force_bytes(
"AC+m4Jf/xI3guAC6w0w37t5zRpSCF6F4udEz5LiMiTIjCS4vcVe6dDOxK+M"
"mvkF8PxJuvqxP2CO3TR3okDPCl/NjATTO1jE+qBZ966CRQSSzcCM+tzcHzw"
"LZS5kbvKu0Acd/K6Ol2/W3B1NeV5F/gjvZn/jOwaLgWEUYsg0o4XVrAg65"
)
)
with open(key_path("testkey_ec.pub"), "r") as keyfile:
jwt_pub_key = algo.prepare_key(keyfile.read())
result = algo.verify(jwt_message, jwt_pub_key, jwt_sig)
assert result
def test_ec_prepare_key_should_be_idempotent(self):
algo = ECAlgorithm(ECAlgorithm.SHA256)
with open(key_path("testkey_ec.pub"), "r") as keyfile:
jwt_pub_key_first = algo.prepare_key(keyfile.read())
jwt_pub_key_second = algo.prepare_key(jwt_pub_key_first)
assert jwt_pub_key_first == jwt_pub_key_second
| 35.734884
| 77
| 0.679422
|
4a15515fe665cb02223e9ab33164a5bb0f267718
| 11,051
|
py
|
Python
|
accel/gpu.py
|
kevjn/simplegrad
|
08b31d588904b890df2e377e5e0591b3ad9273fa
|
[
"MIT"
] | null | null | null |
accel/gpu.py
|
kevjn/simplegrad
|
08b31d588904b890df2e377e5e0591b3ad9273fa
|
[
"MIT"
] | null | null | null |
accel/gpu.py
|
kevjn/simplegrad
|
08b31d588904b890df2e377e5e0591b3ad9273fa
|
[
"MIT"
] | null | null | null |
import numpy as np
import pyopencl as cl
import pyopencl.array
import functools
import itertools as it
class GPU:
class Array(np.lib.mixins.NDArrayOperatorsMixin):
def __init__(self, shape, dtype=np.float32, data=None):
self.shape = shape
self.dtype = np.dtype(dtype)
self.size = int(np.prod(shape))
self.strides = tuple(np.multiply.accumulate([1, *shape[:0:-1]]) * self.dtype.itemsize)[::-1]
self.ndim = len(shape)
self.nbytes = self.dtype.itemsize * self.size
self.data = data
self.base = None
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
assert method == '__call__'
return self.__array_function__(ufunc, None, inputs, kwargs)
def __array_function__(self, func, types, inputs, kwargs):
func = getattr(GPU, func.__name__)
return func(*inputs, **kwargs)
def __sub__(x, y): return x + y * -1
def __rsub__(x, y): return y + x * -1
def __truediv__(x, y): return x * y ** -1
def __rtruediv__(x, y): return y * x ** -1
def __repr__(self):
return f"GPUArray({np.array2string(self.get(), 88, 4, True, ', ', 'GPUArray(', suffix=')')})"
def get(self):
res = np.empty(self.data.size // self.dtype.itemsize, self.dtype)
cl.enqueue_copy(GPU.queue, res, self.data)
if not self.shape:
return res
return np.lib.stride_tricks.as_strided(res, self.shape, self.strides)
def reshape(self, *shape):
if -1 in shape:
shape = tuple(x if x > 0 else
int(abs(np.prod(self.shape) / np.prod(shape)))
for x in shape)
result = GPU.Array(shape, self.dtype)
result.data = self.data
return result
def squeeze(self):
shape = tuple(np.compress(np.array(self.shape) > 1, self.shape))
result = GPU.Array(shape)
result.data = self.data
return result
def transpose(self, *order):
shape = tuple(np.take(self.shape, order))
result = self.__class__(shape)
result.strides = tuple(np.take(self.strides, order))
result.data = self.data
return result
def copy(self):
return GPU.copy(self) # only works for np.int32 type atm
def to_cpu(x):
return x.get()
def reshape(x, shape):
if np.isscalar(shape):
shape = (shape,)
if x.base:
return GPU.broadcast_to(x, shape)
return x.reshape(*shape)
def broadcast_to(x, shape):
if x.base:
x = x.base
# set strides to 0 for all singleton dimensions
strides = np.where(np.equal(x.shape, 1), 0, x.strides)
# add empty trailing strides if needed
strides = np.append(strides, np.array([0]*abs(x.ndim - len(shape)), int))
arr = GPU.Array(shape)
arr.data = x.data
arr.strides = tuple(strides)
arr.base = x
return arr
def as_strided(x, shape, strides):
arr = GPU.Array(shape)
arr.data = x.data
arr.shape = shape
arr.strides = strides
arr.dtype = x.dtype
arr.nbytes = x.nbytes
return arr
def array(arr, dtype=np.float32, ndmin=1, **kwargs):
if isinstance(arr, GPU.Array):
return arr
arr = np.array(arr, copy=False, dtype=dtype, ndmin=ndmin, **kwargs)
if arr.size:
data = cl.Buffer(GPU.ctx, cl.mem_flags.READ_WRITE |
cl.mem_flags.COPY_HOST_PTR, hostbuf=arr)
else:
data = None
return GPU.Array(arr.shape, dtype, data)
def empty(shape, dtype=np.float32):
arr = GPU.Array(shape, dtype)
arr.data = cl.Buffer(GPU.ctx, cl.mem_flags.READ_WRITE, arr.nbytes)
return arr
def arange(n):
return GPU.array(np.arange(n), dtype=np.int32)
class Parser(object):
def wrapper(parser, kernel):
def _wrapper(*args, **kwargs):
args = tuple(x if isinstance(x, (str, GPU.Array))
else GPU.array(x) for x in args)
return parser(kernel, *args, **kwargs)
return _wrapper
def elementwise(kernel, *args, **kwargs):
# allocate output buffer on device
res = GPU.empty(args[0].shape)
kernel([args[0].size], None, *(a.data for a in (*args, res)))
return res
def broadcast(kernel, *args):
res_shape = np.broadcast_shapes(*(x.shape for x in args))
res = GPU.empty(res_shape, dtype=args[0].dtype)
res_strides = np.arange(np.prod(res_shape), dtype=np.int32)
args_strides = tuple(
np.broadcast_to(
np.lib.stride_tricks.as_strided(
np.arange(np.prod(x.shape), dtype=np.int32), x.shape, x.strides),
res_shape).flatten()
for x in args)
# convert to opencl
args = tuple(it.chain(*zip((*args, res), (cl.array.to_device(GPU.queue, x)
for x in (*args_strides, res_strides)))))
kernel([np.prod(res_shape)], None, *(arg.data for arg in args))
return res
def einsum(kernel, subscripts, x, y):
# combines broadcasting and reduction parsing
x_subs, y_subs, out_subs = subscripts.replace('->',',').split(',')
# parse ellipsis if needed
if '...' in subscripts:
x_subs, y_subs = (subs.replace('...', str().join(map(chr, \
range(97, 97 + nd-sum(map(len, subs.split('...'))))))) \
for nd, subs in [(x.ndim, x_subs), (y.ndim, y_subs)])
# TODO: this will not work in all cases
out_subs = max(x_subs, y_subs, key=len)
# deduce output shape
res_shape = tuple([y.shape[y_subs.find(s)], x.shape[x_subs.find(s)]][s in x_subs] for s in out_subs)
reduced_subscripts = list((set(x_subs) | set(y_subs)) - set(out_subs))
if not reduced_subscripts:
# transpose operands relative to out_subs
x = x.transpose(*[out_subs.index(x) for x in x_subs])
y = y.transpose(*[out_subs.index(x) for x in y_subs])
# standard multiplication
return GPU.multiply(x, y)
xstrides = np.arange(np.prod(x.shape), dtype=np.int32)
stride = [int(s in x_subs and x.strides[x_subs.index(s)]) for s in out_subs]
xstrides = np.lib.stride_tricks.as_strided(xstrides, res_shape, stride).copy()
ystrides = np.arange(np.prod(y.shape), dtype=np.int32)
stride = [int(s in y_subs and y.strides[y_subs.index(s)]) for s in out_subs]
ystrides = np.lib.stride_tricks.as_strided(ystrides, res_shape, stride).copy()
# reduced dimension in operands
reduced_shape = tuple([y.shape[y_subs.find(s)], x.shape[x_subs.find(s)]][s in x_subs] for s in reduced_subscripts)
reduced_axes_stride_x = [int(s in x_subs and x.strides[x_subs.index(s)]) for s in reduced_subscripts]
stride = np.arange(np.prod(x.shape), dtype=np.int32)
reduced_axes_stride_x = np.lib.stride_tricks.as_strided(stride, reduced_shape, reduced_axes_stride_x).copy()
reduced_axes_stride_y = [int(s in y_subs and y.strides[y_subs.index(s)]) for s in reduced_subscripts]
stride = np.arange(np.prod(y.shape), dtype=np.int32)
reduced_axes_stride_y = np.lib.stride_tricks.as_strided(stride, reduced_shape, reduced_axes_stride_y).copy()
reduced_axis_size = np.prod(reduced_shape)
res = GPU.empty(res_shape)
res_strides = np.arange(np.prod(res_shape), dtype=np.int32)
# convert to opencl
reduced_axis_size = np.int32(reduced_axis_size)
x_strides = cl.array.to_device(GPU.queue, xstrides)
y_strides = cl.array.to_device(GPU.queue, ystrides)
reduced_axis_stride_x = cl.array.to_device(GPU.queue, reduced_axes_stride_x)
reduced_axis_stride_y = cl.array.to_device(GPU.queue, reduced_axes_stride_y)
res_strides = cl.array.to_device(GPU.queue, res_strides)
# call kernel
kernel([np.prod(res_shape)], None, x.data, y.data, x_strides.data, y_strides.data, \
reduced_axis_stride_x.data, reduced_axis_stride_y.data, reduced_axis_size, res.data, res_strides.data)
return res
def bincount(kernel, x, w, minlength):
res_np = np.zeros(minlength)
res = GPU.array(res_np)
# res_strides = np.arange(np.prod(res_shape), dtype=np.int32)
kernel([x.size], None, x.data, w.data, res.data)
return res
def reduce(kernel, x, axis=None, keepdims=False):
axis = tuple(np.arange(x.ndim)[tuple([axis])].flatten())
meta = np.stack([x.shape, x.strides])
reduced_shape, reduced_strides = meta[:,axis]
result_shape, xstrides = np.delete(meta, axis, axis=1)
strides = np.arange(np.prod(x.shape), dtype=np.int32)
reduced_axes_stride = np.lib.stride_tricks.as_strided(strides, reduced_shape, reduced_strides).copy()
xstrides = np.lib.stride_tricks.as_strided(strides, result_shape, xstrides).copy()
if keepdims:
np.put(meta[0], axis, 1)
result_shape = meta[0]
if not result_shape.size:
result_shape = (1,)
result = GPU.empty(tuple(result_shape))
result_strides = np.arange(np.prod(result_shape), dtype=np.int32)
# convert to opencl
reduced_axes_stride = cl.array.to_device(GPU.queue, reduced_axes_stride)
xstrides = cl.array.to_device(GPU.queue, xstrides)
reduced_axis_size = np.int32(np.prod(reduced_shape))
result_strides = cl.array.to_device(GPU.queue, result_strides)
args = x.data, xstrides.data, reduced_axes_stride.data, reduced_axis_size, \
result.data, result_strides.data
kernel([np.prod(result_shape)], None, *args)
return result
# initialize opencl
GPU.ctx = cl.create_some_context()
GPU.queue = cl.CommandQueue(GPU.ctx)
# compile kernels and set appropriate GPU methods
prg = cl.Program(GPU.ctx, open('./accel/gpu_ops.cl').read()).build()
for kernel in prg.all_kernels():
tokens = kernel.function_name.split("__")
assert len(tokens) == 2
name, parser = tokens
parser = getattr(GPU.Parser, parser)
wrapped_gpu_op = GPU.Parser.wrapper(parser, functools.partial(kernel, GPU.queue))
setattr(GPU, name, wrapped_gpu_op)
| 40.332117
| 126
| 0.58049
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.