max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
src/lgtm_test/bad.py
|
altendky/lgtm_test
| 0
|
6629151
|
import uuid
def create_a_not_really_a_secret():
return uuid.uuid4() # lgtm [py/not-sensitive-data]
def main():
print(create_a_not_really_a_secret())
main()
|
import uuid
def create_a_not_really_a_secret():
return uuid.uuid4() # lgtm [py/not-sensitive-data]
def main():
print(create_a_not_really_a_secret())
main()
|
en
| 0.358223
|
# lgtm [py/not-sensitive-data]
| 1.980149
| 2
|
aoa/__init__.py
|
nodonoughue/emitter-detection-python
| 0
|
6629152
|
<reponame>nodonoughue/emitter-detection-python<filename>aoa/__init__.py
from .aoa import *
from . import directional
from . import doppler
from . import interferometer
from . import watson_watt
|
from .aoa import *
from . import directional
from . import doppler
from . import interferometer
from . import watson_watt
|
none
| 1
| 1.156068
| 1
|
|
raw_python/lib/Udp.py
|
KevinWorkSpace/raw_python-master
| 12
|
6629153
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright 2018 Dept. CSE SUSTech
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# --------------------------------------------------------------------------
# Don't Remove Authors Info |
# --------------------------------------------------------------------------
__author__ = '<NAME> ' # Name Of Author
__credit__ = '[] ' # Contributers Name
__contact__ = '<EMAIL> ' # Email
__copyright__ = 'Copyright 2018 <NAME> ' # Copyright
__license__ = 'Apache 2.0 ' # LICENSE
__Update__ = '2018-01-11 12:00:29.991758 ' # Last Update
__version__ = '0.1 ' # Version
__maintainer__ = '<NAME> ' # Project Current Maintainer
__status__ = 'Production ' # Project Status
# TODO: complete this
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright 2018 Dept. CSE SUSTech
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# --------------------------------------------------------------------------
# Don't Remove Authors Info |
# --------------------------------------------------------------------------
__author__ = '<NAME> ' # Name Of Author
__credit__ = '[] ' # Contributers Name
__contact__ = '<EMAIL> ' # Email
__copyright__ = 'Copyright 2018 <NAME> ' # Copyright
__license__ = 'Apache 2.0 ' # LICENSE
__Update__ = '2018-01-11 12:00:29.991758 ' # Last Update
__version__ = '0.1 ' # Version
__maintainer__ = '<NAME> ' # Project Current Maintainer
__status__ = 'Production ' # Project Status
# TODO: complete this
|
en
| 0.706412
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # # Copyright 2018 Dept. CSE SUSTech # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # -------------------------------------------------------------------------- # Don't Remove Authors Info | # -------------------------------------------------------------------------- # Name Of Author # Contributers Name # Email # Copyright # LICENSE # Last Update # Version # Project Current Maintainer # Project Status # TODO: complete this
| 1.443596
| 1
|
angr/analyses/vsa_ddg.py
|
mariusmue/angr
| 2
|
6629154
|
import logging
from collections import defaultdict
import networkx
from . import Analysis
from .code_location import CodeLocation
from ..errors import AngrDDGError
from ..sim_variable import SimRegisterVariable, SimMemoryVariable
l = logging.getLogger("angr.analyses.vsa_ddg")
class DefUseChain(object):
"""
Stand for a def-use chain. it is generated by the DDG itself.
"""
def __init__(self, def_loc, use_loc, variable):
"""
Constructor.
:param def_loc:
:param use_loc:
:param variable:
:return:
"""
self.def_loc = def_loc
self.use_loc = use_loc
self.variable = variable
class VSA_DDG(Analysis):
"""
A Data dependency graph based on VSA states.
That means we don't (and shouldn't) expect any symbolic expressions.
"""
def __init__(self,
vfg=None,
start_addr=None,
interfunction_level=0,
context_sensitivity_level=2,
keep_data=False,
):
"""
Constructor.
:param vfg: An already constructed VFG. If not specified, a new VFG will be created with other
specified parameters. `vfg` and `start_addr` cannot both be unspecified.
:param start_addr: The address where to start the analysis (typically, a function's entry point).
:param interfunction_level: See VFG analysis.
:param context_sensitivity_level: See VFG analysis.
:param keep_data: Whether we keep set of addresses as edges in the graph, or just the cardinality of
the sets, which can be used as a "weight".
"""
# sanity check
if vfg is None and start_addr is None:
raise AngrDDGError('Argument vfg and start_addr cannot both be unspecified.')
if vfg is not None:
self._vfg = vfg
else:
self._vfg = self.project.analyses.VFG(function_start=start_addr,
interfunction_level=interfunction_level,
context_sensitivity_level=context_sensitivity_level)
self.graph = networkx.DiGraph()
self.keep_data = keep_data
self._simproc_map = {}
self._imarks = {}
self._explore()
#
# Properties
#
def __contains__(self, code_location):
"""
If `code_location` is in the graph.
:param code_location: A CodeLocation instance.
:returns: True/False.
"""
return code_location in self.graph
#
# Public methods
#
def get_predecessors(self, code_location):
"""
Returns all predecessors of `code_location`.
:param code_location: A CodeLocation instance.
:returns: A list of all predecessors.
"""
return self.graph.predecessors(code_location)
#
# Private methods
#
def _explore(self):
"""
Starting from the start_node, explore the entire VFG, and perform the following:
- Generate def-use chains for all registers and memory addresses using a worklist
"""
# TODO: The worklist algorithm can definitely use some optimizations. It is a future work.
# The worklist holds individual VFGNodes that comes from the VFG
# Initialize the worklist with all nodes in VFG
worklist = list(self._vfg.graph.nodes())
# Set up a set of worklist for fast inclusion test
worklist_set = set(worklist)
# A dict storing defs set
# variable -> locations
live_defs_per_node = { }
while worklist:
# Pop out a node
node = worklist[0]
worklist_set.remove(node)
worklist = worklist[ 1 : ]
# Grab all final states. There are usually more than one (one state for each successor), and we gotta
# process all of them
final_states = node.final_states
if node in live_defs_per_node:
live_defs = live_defs_per_node[node]
else:
live_defs = { }
live_defs_per_node[node] = live_defs
successing_nodes = self._vfg.graph.successors(node)
for state in final_states:
if state.history.jumpkind == 'Ijk_FakeRet' and len(final_states) > 1:
# Skip fakerets if there are other control flow transitions available
continue
# TODO: Match the jumpkind
# TODO: Support cases where IP is undecidable
corresponding_successors = [ n for n in successing_nodes if n.addr == state.se.eval(state.ip) ]
if not corresponding_successors:
continue
successing_node = corresponding_successors[0]
new_defs = self._track(state, live_defs)
if successing_node in live_defs_per_node:
defs_for_next_node = live_defs_per_node[successing_node]
else:
defs_for_next_node = { }
live_defs_per_node[successing_node] = defs_for_next_node
changed = False
for var, code_loc_set in new_defs.iteritems():
if var not in defs_for_next_node:
defs_for_next_node[var] = code_loc_set
changed = True
else:
for code_loc in code_loc_set:
if code_loc not in defs_for_next_node[var]:
defs_for_next_node[var].add(code_loc)
changed = True
if changed:
# Put all reachable successors back to our worklist again
if successing_node not in worklist_set:
worklist.append(successing_node)
worklist_set.add(successing_node)
all_successors_dict = networkx.dfs_successors(self._vfg.graph, source=successing_node)
for successors in all_successors_dict.values():
for s in successors:
if s not in worklist_set:
worklist.append(s)
worklist_set.add(s)
def _track(self, state, live_defs):
"""
Given all live definitions prior to this program point, track the changes, and return a new list of live
definitions. We scan through the action list of the new state to track the changes.
:param state: The input state at that program point.
:param live_defs: A list of all live definitions prior to reaching this program point.
:returns: A list of new live definitions.
"""
# Make a copy of live_defs
live_defs = live_defs.copy()
action_list = list(state.history.recent_actions)
# Since all temporary variables are local, we simply track them in a local dict
temps = { }
# All dependence edges are added to the graph either at the end of this method, or when they are going to be
# overwritten by a new edge. This is because we sometimes have to modify a previous edge (e.g. add new labels
# to the edge)
temps_to_edges = defaultdict(list)
regs_to_edges = defaultdict(list)
def _annotate_edges_in_dict(dict_, key, **new_labels):
"""
:param dict_: The dict, can be either `temps_to_edges` or `regs_to_edges`
:param key: The key used in finding elements in the dict
:param new_labels: New labels to be added to those edges
"""
for edge_tuple in dict_[key]:
# unpack it
_, _, labels = edge_tuple
for k, v in new_labels.iteritems():
if k in labels:
labels[k] = labels[k] + (v, )
else:
# Construct a tuple
labels[k] = (v, )
def _dump_edge_from_dict(dict_, key, del_key=True):
"""
Pick an edge from the dict based on the key specified, add it to our graph, and remove the key from dict.
:param dict_: The dict, can be either `temps_to_edges` or `regs_to_edges`.
:param key: The key used in finding elements in the dict.
"""
for edge_tuple in dict_[key]:
# unpack it
prev_code_loc, current_code_loc, labels = edge_tuple
# Add the new edge
self._add_edge(prev_code_loc, current_code_loc, **labels)
# Clear it
if del_key:
del dict_[key]
for a in action_list:
if a.bbl_addr is None:
current_code_loc = CodeLocation(None, None, sim_procedure=a.sim_procedure)
else:
current_code_loc = CodeLocation(a.bbl_addr, a.stmt_idx, ins_addr=a.ins_addr)
if a.type == "mem":
if a.actual_addrs is None:
# For now, mem reads don't necessarily have actual_addrs set properly
addr_list = set(state.memory.normalize_address(a.addr.ast, convert_to_valueset=True))
else:
addr_list = set(a.actual_addrs)
for addr in addr_list:
variable = SimMemoryVariable(addr, a.data.ast.size()) # TODO: Properly unpack the SAO
if a.action == "read":
# Create an edge between def site and use site
prevdefs = self._def_lookup(live_defs, variable)
for prev_code_loc, labels in prevdefs.iteritems():
self._read_edge = True
self._add_edge(prev_code_loc, current_code_loc, **labels)
else: #if a.action == "write":
# Kill the existing live def
self._kill(live_defs, variable, current_code_loc)
# For each of its register dependency and data dependency, we revise the corresponding edge
for reg_off in a.addr.reg_deps:
_annotate_edges_in_dict(regs_to_edges, reg_off, subtype='mem_addr')
for tmp in a.addr.tmp_deps:
_annotate_edges_in_dict(temps_to_edges, tmp, subtype='mem_addr')
for reg_off in a.data.reg_deps:
_annotate_edges_in_dict(regs_to_edges, reg_off, subtype='mem_data')
for tmp in a.data.tmp_deps:
_annotate_edges_in_dict(temps_to_edges, tmp, subtype='mem_data')
elif a.type == 'reg':
# For now, we assume a.offset is not symbolic
# TODO: Support symbolic register offsets
#variable = SimRegisterVariable(a.offset, a.data.ast.size())
variable = SimRegisterVariable(a.offset, self.project.arch.bits)
if a.action == 'read':
# What do we want to do?
prevdefs = self._def_lookup(live_defs, variable)
if a.offset in regs_to_edges:
_dump_edge_from_dict(regs_to_edges, a.offset)
for prev_code_loc, labels in prevdefs.iteritems():
edge_tuple = (prev_code_loc, current_code_loc, labels)
regs_to_edges[a.offset].append(edge_tuple)
else:
# write
self._kill(live_defs, variable, current_code_loc)
elif a.type == 'tmp':
# tmp is definitely not symbolic
if a.action == 'read':
prev_code_loc = temps[a.tmp]
edge_tuple = (prev_code_loc, current_code_loc, {'type':'tmp', 'data':a.tmp})
if a.tmp in temps_to_edges:
_dump_edge_from_dict(temps_to_edges, a.tmp)
temps_to_edges[a.tmp].append(edge_tuple)
else:
# write
temps[a.tmp] = current_code_loc
elif a.type == 'exit':
# exits should only depend on tmps
for tmp in a.tmp_deps:
prev_code_loc = temps[tmp]
edge_tuple = (prev_code_loc, current_code_loc, {'type': 'exit', 'data': tmp})
if tmp in temps_to_edges:
_dump_edge_from_dict(temps_to_edges, tmp)
temps_to_edges[tmp].append(edge_tuple)
# In the end, dump all other edges in those two dicts
for reg_offset in regs_to_edges:
_dump_edge_from_dict(regs_to_edges, reg_offset, del_key=False)
for tmp in temps_to_edges:
_dump_edge_from_dict(temps_to_edges, tmp, del_key=False)
return live_defs
# TODO : This docstring is out of date, what is addr_list?
def _def_lookup(self, live_defs, variable):
"""
This is a backward lookup in the previous defs.
:param addr_list: a list of normalized addresses.
Note that, as we are using VSA, it is possible that @a is affected by several definitions.
:returns: a dict {stmt:labels} where label is the number of individual addresses of addr_list (or the
actual set of addresses depending on the keep_addrs flag) that are definted by stmt.
"""
prevdefs = { }
if variable in live_defs:
code_loc_set = live_defs[variable]
for code_loc in code_loc_set:
# Label edges with cardinality or actual sets of addresses
if isinstance(variable, SimMemoryVariable):
type_ = 'mem'
elif isinstance(variable, SimRegisterVariable):
type_ = 'reg'
else:
raise AngrDDGError('Unknown variable type %s' % type(variable))
if self.keep_data is True:
data = variable
prevdefs[code_loc] = {
'type': type_,
'data': data
}
else:
if code_loc in prevdefs:
count = prevdefs[code_loc]['count'] + 1
else:
count = 0
prevdefs[code_loc] = {
'type': type_,
'count': count
}
return prevdefs
def _kill(self, live_defs, variable, code_loc):
"""
Kill previous defs. `addr_list` is a list of normalized addresses.
"""
# Case 1: address perfectly match, we kill
# Case 2: a is a subset of the original address
# Case 3: a is a superset of the original address
live_defs[variable] = { code_loc }
l.debug("XX CodeLoc %s kills variable %s", code_loc, variable)
def _add_edge(self, s_a, s_b, **edge_labels):
"""
Add an edge in the graph from `s_a` to statement `s_b`, where `s_a` and `s_b` are tuples of statements of the
form (irsb_addr, stmt_idx).
"""
# Is that edge already in the graph ?
# If at least one is new, then we are not redoing the same path again
if (s_a, s_b) not in self.graph.edges():
self.graph.add_edge(s_a, s_b, **edge_labels)
self._new = True
l.info("New edge: %s --> %s", s_a, s_b)
def get_all_nodes(self, simrun_addr, stmt_idx):
"""
Get all DDG nodes matching the given basic block address and statement index.
"""
nodes=[]
for n in self.graph.nodes():
if n.simrun_addr == simrun_addr and n.stmt_idx == stmt_idx:
nodes.add(n)
return nodes
from angr.analyses import AnalysesHub
AnalysesHub.register_default('VSA_DDG', VSA_DDG)
|
import logging
from collections import defaultdict
import networkx
from . import Analysis
from .code_location import CodeLocation
from ..errors import AngrDDGError
from ..sim_variable import SimRegisterVariable, SimMemoryVariable
l = logging.getLogger("angr.analyses.vsa_ddg")
class DefUseChain(object):
"""
Stand for a def-use chain. it is generated by the DDG itself.
"""
def __init__(self, def_loc, use_loc, variable):
"""
Constructor.
:param def_loc:
:param use_loc:
:param variable:
:return:
"""
self.def_loc = def_loc
self.use_loc = use_loc
self.variable = variable
class VSA_DDG(Analysis):
"""
A Data dependency graph based on VSA states.
That means we don't (and shouldn't) expect any symbolic expressions.
"""
def __init__(self,
vfg=None,
start_addr=None,
interfunction_level=0,
context_sensitivity_level=2,
keep_data=False,
):
"""
Constructor.
:param vfg: An already constructed VFG. If not specified, a new VFG will be created with other
specified parameters. `vfg` and `start_addr` cannot both be unspecified.
:param start_addr: The address where to start the analysis (typically, a function's entry point).
:param interfunction_level: See VFG analysis.
:param context_sensitivity_level: See VFG analysis.
:param keep_data: Whether we keep set of addresses as edges in the graph, or just the cardinality of
the sets, which can be used as a "weight".
"""
# sanity check
if vfg is None and start_addr is None:
raise AngrDDGError('Argument vfg and start_addr cannot both be unspecified.')
if vfg is not None:
self._vfg = vfg
else:
self._vfg = self.project.analyses.VFG(function_start=start_addr,
interfunction_level=interfunction_level,
context_sensitivity_level=context_sensitivity_level)
self.graph = networkx.DiGraph()
self.keep_data = keep_data
self._simproc_map = {}
self._imarks = {}
self._explore()
#
# Properties
#
def __contains__(self, code_location):
"""
If `code_location` is in the graph.
:param code_location: A CodeLocation instance.
:returns: True/False.
"""
return code_location in self.graph
#
# Public methods
#
def get_predecessors(self, code_location):
"""
Returns all predecessors of `code_location`.
:param code_location: A CodeLocation instance.
:returns: A list of all predecessors.
"""
return self.graph.predecessors(code_location)
#
# Private methods
#
def _explore(self):
"""
Starting from the start_node, explore the entire VFG, and perform the following:
- Generate def-use chains for all registers and memory addresses using a worklist
"""
# TODO: The worklist algorithm can definitely use some optimizations. It is a future work.
# The worklist holds individual VFGNodes that comes from the VFG
# Initialize the worklist with all nodes in VFG
worklist = list(self._vfg.graph.nodes())
# Set up a set of worklist for fast inclusion test
worklist_set = set(worklist)
# A dict storing defs set
# variable -> locations
live_defs_per_node = { }
while worklist:
# Pop out a node
node = worklist[0]
worklist_set.remove(node)
worklist = worklist[ 1 : ]
# Grab all final states. There are usually more than one (one state for each successor), and we gotta
# process all of them
final_states = node.final_states
if node in live_defs_per_node:
live_defs = live_defs_per_node[node]
else:
live_defs = { }
live_defs_per_node[node] = live_defs
successing_nodes = self._vfg.graph.successors(node)
for state in final_states:
if state.history.jumpkind == 'Ijk_FakeRet' and len(final_states) > 1:
# Skip fakerets if there are other control flow transitions available
continue
# TODO: Match the jumpkind
# TODO: Support cases where IP is undecidable
corresponding_successors = [ n for n in successing_nodes if n.addr == state.se.eval(state.ip) ]
if not corresponding_successors:
continue
successing_node = corresponding_successors[0]
new_defs = self._track(state, live_defs)
if successing_node in live_defs_per_node:
defs_for_next_node = live_defs_per_node[successing_node]
else:
defs_for_next_node = { }
live_defs_per_node[successing_node] = defs_for_next_node
changed = False
for var, code_loc_set in new_defs.iteritems():
if var not in defs_for_next_node:
defs_for_next_node[var] = code_loc_set
changed = True
else:
for code_loc in code_loc_set:
if code_loc not in defs_for_next_node[var]:
defs_for_next_node[var].add(code_loc)
changed = True
if changed:
# Put all reachable successors back to our worklist again
if successing_node not in worklist_set:
worklist.append(successing_node)
worklist_set.add(successing_node)
all_successors_dict = networkx.dfs_successors(self._vfg.graph, source=successing_node)
for successors in all_successors_dict.values():
for s in successors:
if s not in worklist_set:
worklist.append(s)
worklist_set.add(s)
def _track(self, state, live_defs):
"""
Given all live definitions prior to this program point, track the changes, and return a new list of live
definitions. We scan through the action list of the new state to track the changes.
:param state: The input state at that program point.
:param live_defs: A list of all live definitions prior to reaching this program point.
:returns: A list of new live definitions.
"""
# Make a copy of live_defs
live_defs = live_defs.copy()
action_list = list(state.history.recent_actions)
# Since all temporary variables are local, we simply track them in a local dict
temps = { }
# All dependence edges are added to the graph either at the end of this method, or when they are going to be
# overwritten by a new edge. This is because we sometimes have to modify a previous edge (e.g. add new labels
# to the edge)
temps_to_edges = defaultdict(list)
regs_to_edges = defaultdict(list)
def _annotate_edges_in_dict(dict_, key, **new_labels):
"""
:param dict_: The dict, can be either `temps_to_edges` or `regs_to_edges`
:param key: The key used in finding elements in the dict
:param new_labels: New labels to be added to those edges
"""
for edge_tuple in dict_[key]:
# unpack it
_, _, labels = edge_tuple
for k, v in new_labels.iteritems():
if k in labels:
labels[k] = labels[k] + (v, )
else:
# Construct a tuple
labels[k] = (v, )
def _dump_edge_from_dict(dict_, key, del_key=True):
"""
Pick an edge from the dict based on the key specified, add it to our graph, and remove the key from dict.
:param dict_: The dict, can be either `temps_to_edges` or `regs_to_edges`.
:param key: The key used in finding elements in the dict.
"""
for edge_tuple in dict_[key]:
# unpack it
prev_code_loc, current_code_loc, labels = edge_tuple
# Add the new edge
self._add_edge(prev_code_loc, current_code_loc, **labels)
# Clear it
if del_key:
del dict_[key]
for a in action_list:
if a.bbl_addr is None:
current_code_loc = CodeLocation(None, None, sim_procedure=a.sim_procedure)
else:
current_code_loc = CodeLocation(a.bbl_addr, a.stmt_idx, ins_addr=a.ins_addr)
if a.type == "mem":
if a.actual_addrs is None:
# For now, mem reads don't necessarily have actual_addrs set properly
addr_list = set(state.memory.normalize_address(a.addr.ast, convert_to_valueset=True))
else:
addr_list = set(a.actual_addrs)
for addr in addr_list:
variable = SimMemoryVariable(addr, a.data.ast.size()) # TODO: Properly unpack the SAO
if a.action == "read":
# Create an edge between def site and use site
prevdefs = self._def_lookup(live_defs, variable)
for prev_code_loc, labels in prevdefs.iteritems():
self._read_edge = True
self._add_edge(prev_code_loc, current_code_loc, **labels)
else: #if a.action == "write":
# Kill the existing live def
self._kill(live_defs, variable, current_code_loc)
# For each of its register dependency and data dependency, we revise the corresponding edge
for reg_off in a.addr.reg_deps:
_annotate_edges_in_dict(regs_to_edges, reg_off, subtype='mem_addr')
for tmp in a.addr.tmp_deps:
_annotate_edges_in_dict(temps_to_edges, tmp, subtype='mem_addr')
for reg_off in a.data.reg_deps:
_annotate_edges_in_dict(regs_to_edges, reg_off, subtype='mem_data')
for tmp in a.data.tmp_deps:
_annotate_edges_in_dict(temps_to_edges, tmp, subtype='mem_data')
elif a.type == 'reg':
# For now, we assume a.offset is not symbolic
# TODO: Support symbolic register offsets
#variable = SimRegisterVariable(a.offset, a.data.ast.size())
variable = SimRegisterVariable(a.offset, self.project.arch.bits)
if a.action == 'read':
# What do we want to do?
prevdefs = self._def_lookup(live_defs, variable)
if a.offset in regs_to_edges:
_dump_edge_from_dict(regs_to_edges, a.offset)
for prev_code_loc, labels in prevdefs.iteritems():
edge_tuple = (prev_code_loc, current_code_loc, labels)
regs_to_edges[a.offset].append(edge_tuple)
else:
# write
self._kill(live_defs, variable, current_code_loc)
elif a.type == 'tmp':
# tmp is definitely not symbolic
if a.action == 'read':
prev_code_loc = temps[a.tmp]
edge_tuple = (prev_code_loc, current_code_loc, {'type':'tmp', 'data':a.tmp})
if a.tmp in temps_to_edges:
_dump_edge_from_dict(temps_to_edges, a.tmp)
temps_to_edges[a.tmp].append(edge_tuple)
else:
# write
temps[a.tmp] = current_code_loc
elif a.type == 'exit':
# exits should only depend on tmps
for tmp in a.tmp_deps:
prev_code_loc = temps[tmp]
edge_tuple = (prev_code_loc, current_code_loc, {'type': 'exit', 'data': tmp})
if tmp in temps_to_edges:
_dump_edge_from_dict(temps_to_edges, tmp)
temps_to_edges[tmp].append(edge_tuple)
# In the end, dump all other edges in those two dicts
for reg_offset in regs_to_edges:
_dump_edge_from_dict(regs_to_edges, reg_offset, del_key=False)
for tmp in temps_to_edges:
_dump_edge_from_dict(temps_to_edges, tmp, del_key=False)
return live_defs
# TODO : This docstring is out of date, what is addr_list?
def _def_lookup(self, live_defs, variable):
"""
This is a backward lookup in the previous defs.
:param addr_list: a list of normalized addresses.
Note that, as we are using VSA, it is possible that @a is affected by several definitions.
:returns: a dict {stmt:labels} where label is the number of individual addresses of addr_list (or the
actual set of addresses depending on the keep_addrs flag) that are definted by stmt.
"""
prevdefs = { }
if variable in live_defs:
code_loc_set = live_defs[variable]
for code_loc in code_loc_set:
# Label edges with cardinality or actual sets of addresses
if isinstance(variable, SimMemoryVariable):
type_ = 'mem'
elif isinstance(variable, SimRegisterVariable):
type_ = 'reg'
else:
raise AngrDDGError('Unknown variable type %s' % type(variable))
if self.keep_data is True:
data = variable
prevdefs[code_loc] = {
'type': type_,
'data': data
}
else:
if code_loc in prevdefs:
count = prevdefs[code_loc]['count'] + 1
else:
count = 0
prevdefs[code_loc] = {
'type': type_,
'count': count
}
return prevdefs
def _kill(self, live_defs, variable, code_loc):
"""
Kill previous defs. `addr_list` is a list of normalized addresses.
"""
# Case 1: address perfectly match, we kill
# Case 2: a is a subset of the original address
# Case 3: a is a superset of the original address
live_defs[variable] = { code_loc }
l.debug("XX CodeLoc %s kills variable %s", code_loc, variable)
def _add_edge(self, s_a, s_b, **edge_labels):
"""
Add an edge in the graph from `s_a` to statement `s_b`, where `s_a` and `s_b` are tuples of statements of the
form (irsb_addr, stmt_idx).
"""
# Is that edge already in the graph ?
# If at least one is new, then we are not redoing the same path again
if (s_a, s_b) not in self.graph.edges():
self.graph.add_edge(s_a, s_b, **edge_labels)
self._new = True
l.info("New edge: %s --> %s", s_a, s_b)
def get_all_nodes(self, simrun_addr, stmt_idx):
"""
Get all DDG nodes matching the given basic block address and statement index.
"""
nodes=[]
for n in self.graph.nodes():
if n.simrun_addr == simrun_addr and n.stmt_idx == stmt_idx:
nodes.add(n)
return nodes
from angr.analyses import AnalysesHub
AnalysesHub.register_default('VSA_DDG', VSA_DDG)
|
en
| 0.851916
|
Stand for a def-use chain. it is generated by the DDG itself. Constructor. :param def_loc: :param use_loc: :param variable: :return: A Data dependency graph based on VSA states. That means we don't (and shouldn't) expect any symbolic expressions. Constructor. :param vfg: An already constructed VFG. If not specified, a new VFG will be created with other specified parameters. `vfg` and `start_addr` cannot both be unspecified. :param start_addr: The address where to start the analysis (typically, a function's entry point). :param interfunction_level: See VFG analysis. :param context_sensitivity_level: See VFG analysis. :param keep_data: Whether we keep set of addresses as edges in the graph, or just the cardinality of the sets, which can be used as a "weight". # sanity check # # Properties # If `code_location` is in the graph. :param code_location: A CodeLocation instance. :returns: True/False. # # Public methods # Returns all predecessors of `code_location`. :param code_location: A CodeLocation instance. :returns: A list of all predecessors. # # Private methods # Starting from the start_node, explore the entire VFG, and perform the following: - Generate def-use chains for all registers and memory addresses using a worklist # TODO: The worklist algorithm can definitely use some optimizations. It is a future work. # The worklist holds individual VFGNodes that comes from the VFG # Initialize the worklist with all nodes in VFG # Set up a set of worklist for fast inclusion test # A dict storing defs set # variable -> locations # Pop out a node # Grab all final states. There are usually more than one (one state for each successor), and we gotta # process all of them # Skip fakerets if there are other control flow transitions available # TODO: Match the jumpkind # TODO: Support cases where IP is undecidable # Put all reachable successors back to our worklist again Given all live definitions prior to this program point, track the changes, and return a new list of live definitions. We scan through the action list of the new state to track the changes. :param state: The input state at that program point. :param live_defs: A list of all live definitions prior to reaching this program point. :returns: A list of new live definitions. # Make a copy of live_defs # Since all temporary variables are local, we simply track them in a local dict # All dependence edges are added to the graph either at the end of this method, or when they are going to be # overwritten by a new edge. This is because we sometimes have to modify a previous edge (e.g. add new labels # to the edge) :param dict_: The dict, can be either `temps_to_edges` or `regs_to_edges` :param key: The key used in finding elements in the dict :param new_labels: New labels to be added to those edges # unpack it # Construct a tuple Pick an edge from the dict based on the key specified, add it to our graph, and remove the key from dict. :param dict_: The dict, can be either `temps_to_edges` or `regs_to_edges`. :param key: The key used in finding elements in the dict. # unpack it # Add the new edge # Clear it # For now, mem reads don't necessarily have actual_addrs set properly # TODO: Properly unpack the SAO # Create an edge between def site and use site #if a.action == "write": # Kill the existing live def # For each of its register dependency and data dependency, we revise the corresponding edge # For now, we assume a.offset is not symbolic # TODO: Support symbolic register offsets #variable = SimRegisterVariable(a.offset, a.data.ast.size()) # What do we want to do? # write # tmp is definitely not symbolic # write # exits should only depend on tmps # In the end, dump all other edges in those two dicts # TODO : This docstring is out of date, what is addr_list? This is a backward lookup in the previous defs. :param addr_list: a list of normalized addresses. Note that, as we are using VSA, it is possible that @a is affected by several definitions. :returns: a dict {stmt:labels} where label is the number of individual addresses of addr_list (or the actual set of addresses depending on the keep_addrs flag) that are definted by stmt. # Label edges with cardinality or actual sets of addresses Kill previous defs. `addr_list` is a list of normalized addresses. # Case 1: address perfectly match, we kill # Case 2: a is a subset of the original address # Case 3: a is a superset of the original address Add an edge in the graph from `s_a` to statement `s_b`, where `s_a` and `s_b` are tuples of statements of the form (irsb_addr, stmt_idx). # Is that edge already in the graph ? # If at least one is new, then we are not redoing the same path again Get all DDG nodes matching the given basic block address and statement index.
| 2.660426
| 3
|
web/app/lib/ee/filter.py
|
geary/claslite
| 0
|
6629155
|
"""Collection filters.
Example usage:
Filter('time', low, high)
.bounds(ring)
.eq('time', value)
.lt('time', value)
"""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# Our custom instance/static decorator is not recognized by lint.
# pylint: disable=no-self-argument, no-method-argument, g-doc-args
import datetime
import functools
import apifunction
import computedobject
import ee_exception
class _FilterAutoCreator(object):
"""A decorator to make Filter methods both static and instance.
If the decorated method is called as an instance method, its result is passed
through _append().
"""
def __init__(self, func):
self.func = func
def __get__(self, filter_instance, cls=None):
if filter_instance is None:
return self.func
@functools.wraps(self.func)
def PassThroughAppend(*args, **kwargs):
return filter_instance._append( # pylint: disable=protected-access
self.func(*args, **kwargs))
return PassThroughAppend
# A map from the deprecated old-style comparison operator names to API
# function names, implicitly prefixed with "Filter.". Negative operators
# (those starting with "not_") are not included.
_FUNCTION_NAMES = {
'equals': 'equals',
'less_than': 'lessThan',
'greater_than': 'greaterThan',
'contains': 'stringContains',
'starts_with': 'stringStartsWith',
'ends_with': 'stringEndsWith',
}
class Filter(computedobject.ComputedObject):
"""An object to represent collection filters."""
_initialized = False
def __init__(self, opt_filter=None):
"""Construct a filter.
This constuctor accepts the following args:
1) Another filter.
2) An array of filters (which are implicitly ANDed together).
3) A ComputedObject returning a filter. Users shouldn't be making these;
they're produced by the generator functions below.
Args:
opt_filter: Optional filter to add.
"""
self.initialize()
if isinstance(opt_filter, (list, tuple)):
if not opt_filter:
raise ee_exception.EEException('Empty list specified for ee.Filter().')
elif len(opt_filter) == 1:
opt_filter = opt_filter[0]
else:
self._filter = tuple(opt_filter)
super(Filter, self).__init__(
apifunction.ApiFunction.lookup('Filter.and'),
{'filters': self._filter})
return
if isinstance(opt_filter, computedobject.ComputedObject):
super(Filter, self).__init__(opt_filter.func, opt_filter.args)
self._filter = (opt_filter,)
elif opt_filter is None:
# A silly call with no arguments left for backward-compatibility.
# Encoding such a filter is expected to fail, but it can be composed
# by calling the various methods that end up in _append().
super(Filter, self).__init__(None, None)
self._filter = ()
else:
raise ee_exception.EEException(
'Invalid argument specified for ee.Filter(): %s' % opt_filter)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'Filter', 'Filter')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
def predicateCount(self):
"""Return the number of predicates that have been added to this filter.
Returns:
The number of predicates that have been added to this filter.
This does not count nested predicates.
"""
return len(self._filter)
def _append(self, new_filter):
"""Append a predicate to this filter.
These are implicitly ANDed.
Args:
new_filter: The filter to append to this one. Possible types are:
1) another fully constructed Filter,
2) a JSON representation of a filter,
3) an array of 1 or 2.
Returns:
A new filter that is the combination of both.
"""
if new_filter is not None:
prev = list(self._filter)
if isinstance(new_filter, Filter):
prev.extend(new_filter._filter) # pylint: disable=protected-access
elif isinstance(new_filter, list):
prev.extend(new_filter)
else:
prev.append(new_filter)
return Filter(prev)
def Not(self):
"""Returns the opposite of this filter.
Returns:
The negated filter, which will match iff this filter doesn't.
"""
return apifunction.ApiFunction.call_('Filter.not', self)
@_FilterAutoCreator
def metadata_(name, operator, value):
"""Filter on metadata. This is deprecated.
Args:
name: The property name to filter on.
operator: The type of comparison. One of:
"equals", "less_than", "greater_than", "contains", "begins_with",
"ends_with", or any of these prefixed with "not_".
value: The value to compare against.
Returns:
The new filter.
"""
operator = operator.lower()
# Check for negated filters.
negated = False
if operator.startswith('not_'):
negated = True
operator = operator[4:]
# Convert the operator to a function.
if operator not in _FUNCTION_NAMES:
raise ee_exception.EEException(
'Unknown filtering operator: %s' % operator)
func_name = 'Filter.' + _FUNCTION_NAMES[operator]
new_filter = apifunction.ApiFunction.call_(func_name, name, value)
return new_filter.Not() if negated else new_filter
@_FilterAutoCreator
def eq(name, value):
"""Filter to metadata equal to the given value."""
return apifunction.ApiFunction.call_('Filter.equals', name, value)
@_FilterAutoCreator
def neq(name, value):
"""Filter to metadata not equal to the given value."""
return Filter.eq(name, value).Not()
@_FilterAutoCreator
def lt(name, value):
"""Filter to metadata less than the given value."""
return apifunction.ApiFunction.call_('Filter.lessThan', name, value)
@_FilterAutoCreator
def gte(name, value):
"""Filter on metadata greater than or equal to the given value."""
return Filter.lt(name, value).Not()
@_FilterAutoCreator
def gt(name, value):
"""Filter on metadata greater than the given value."""
return apifunction.ApiFunction.call_('Filter.greaterThan', name, value)
@_FilterAutoCreator
def lte(name, value):
"""Filter on metadata less than or equal to the given value."""
return Filter.gt(name, value).Not()
@_FilterAutoCreator
def contains(name, value):
"""Filter on metadata containing the given string."""
return apifunction.ApiFunction.call_('Filter.stringContains', name, value)
@_FilterAutoCreator
def not_contains(name, value):
"""Filter on metadata not containing the given string."""
return Filter.contains(name, value).Not()
@_FilterAutoCreator
def starts_with(name, value):
"""Filter on metadata begining with the given string."""
return apifunction.ApiFunction.call_('Filter.stringStartsWith', name, value)
@_FilterAutoCreator
def not_starts_with(name, value):
"""Filter on metadata not begining with the given string."""
return Filter.starts_with(name, value).Not()
@_FilterAutoCreator
def ends_with(name, value):
"""Filter on metadata ending with the given string."""
return apifunction.ApiFunction.call_('Filter.stringEndsWith', name, value)
@_FilterAutoCreator
def not_ends_with(name, value):
"""Filter on metadata not ending with the given string."""
return Filter.ends_with(name, value).Not()
@_FilterAutoCreator
def And(*args):
"""Combine two or more filters using boolean AND."""
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return apifunction.ApiFunction.call_('Filter.and', args)
@staticmethod
def Or(*args):
"""Combine two or more filters using boolean OR."""
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return apifunction.ApiFunction.call_('Filter.or', args)
@_FilterAutoCreator
def date(start, opt_end=None):
"""Filter images by date.
Args:
start: The start date as a UTC datetime or ms since Unix epoch.
opt_end: The end date as a UTC datetime or ms since Unix epoch.
Returns:
The modified filter.
"""
if opt_end is None:
# Can't create half-open DateRanges. Hack around it.
opt_end = datetime.datetime(9999, 1, 1)
date_range = apifunction.ApiFunction.call_('DateRange', start, opt_end)
return apifunction.ApiFunction.apply_('Filter.dateRangeContains', {
'leftValue': date_range,
'rightField': 'system:time_start'
})
@_FilterAutoCreator
def inList(opt_leftField=None,
opt_rightValue=None,
opt_rightField=None,
opt_leftValue=None):
"""Filter on metadata contained in a list.
Args:
opt_leftField: A selector for the left operand.
Should not be specified if leftValue is specified.
opt_rightValue: The value of the right operand.
Should not be specified if rightField is specified.
opt_rightField: A selector for the right operand.
Should not be specified if rightValue is specified.
opt_leftValue: The value of the left operand.
Should not be specified if leftField is specified.
Returns:
The constructed filter.
"""
# Implement this in terms of listContains, with the arguments switched.
# In listContains the list is on the left side, while in inList it's on
# the right.
return apifunction.ApiFunction.apply_('Filter.listContains', {
'leftField': opt_rightField,
'rightValue': opt_leftValue,
'rightField': opt_leftField,
'leftValue': opt_rightValue
})
@_FilterAutoCreator
def geometry(geometry, opt_errorMargin=None):
"""Filter on bounds.
Items in the collection with a footprint that fails to intersect
the bounds will be excluded when the collection is evaluated.
Args:
geometry: The geometry to filter to either as a GeoJSON geometry,
or a FeatureCollection, from which a geometry will be extracted.
opt_errorMargin: An optional error margin. If a number, interpreted as
sphere surface meters.
Returns:
The modified filter.
"""
# Invoke geometry promotion then manually promote to a Feature.
args = {
'leftField': '.all',
'rightValue': apifunction.ApiFunction.call_('Feature', geometry)
}
if opt_errorMargin is not None:
args['maxError'] = opt_errorMargin
return apifunction.ApiFunction.apply_('Filter.intersects', args)
@staticmethod
def name():
return 'Filter'
|
"""Collection filters.
Example usage:
Filter('time', low, high)
.bounds(ring)
.eq('time', value)
.lt('time', value)
"""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# Our custom instance/static decorator is not recognized by lint.
# pylint: disable=no-self-argument, no-method-argument, g-doc-args
import datetime
import functools
import apifunction
import computedobject
import ee_exception
class _FilterAutoCreator(object):
"""A decorator to make Filter methods both static and instance.
If the decorated method is called as an instance method, its result is passed
through _append().
"""
def __init__(self, func):
self.func = func
def __get__(self, filter_instance, cls=None):
if filter_instance is None:
return self.func
@functools.wraps(self.func)
def PassThroughAppend(*args, **kwargs):
return filter_instance._append( # pylint: disable=protected-access
self.func(*args, **kwargs))
return PassThroughAppend
# A map from the deprecated old-style comparison operator names to API
# function names, implicitly prefixed with "Filter.". Negative operators
# (those starting with "not_") are not included.
_FUNCTION_NAMES = {
'equals': 'equals',
'less_than': 'lessThan',
'greater_than': 'greaterThan',
'contains': 'stringContains',
'starts_with': 'stringStartsWith',
'ends_with': 'stringEndsWith',
}
class Filter(computedobject.ComputedObject):
"""An object to represent collection filters."""
_initialized = False
def __init__(self, opt_filter=None):
"""Construct a filter.
This constuctor accepts the following args:
1) Another filter.
2) An array of filters (which are implicitly ANDed together).
3) A ComputedObject returning a filter. Users shouldn't be making these;
they're produced by the generator functions below.
Args:
opt_filter: Optional filter to add.
"""
self.initialize()
if isinstance(opt_filter, (list, tuple)):
if not opt_filter:
raise ee_exception.EEException('Empty list specified for ee.Filter().')
elif len(opt_filter) == 1:
opt_filter = opt_filter[0]
else:
self._filter = tuple(opt_filter)
super(Filter, self).__init__(
apifunction.ApiFunction.lookup('Filter.and'),
{'filters': self._filter})
return
if isinstance(opt_filter, computedobject.ComputedObject):
super(Filter, self).__init__(opt_filter.func, opt_filter.args)
self._filter = (opt_filter,)
elif opt_filter is None:
# A silly call with no arguments left for backward-compatibility.
# Encoding such a filter is expected to fail, but it can be composed
# by calling the various methods that end up in _append().
super(Filter, self).__init__(None, None)
self._filter = ()
else:
raise ee_exception.EEException(
'Invalid argument specified for ee.Filter(): %s' % opt_filter)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'Filter', 'Filter')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
def predicateCount(self):
"""Return the number of predicates that have been added to this filter.
Returns:
The number of predicates that have been added to this filter.
This does not count nested predicates.
"""
return len(self._filter)
def _append(self, new_filter):
"""Append a predicate to this filter.
These are implicitly ANDed.
Args:
new_filter: The filter to append to this one. Possible types are:
1) another fully constructed Filter,
2) a JSON representation of a filter,
3) an array of 1 or 2.
Returns:
A new filter that is the combination of both.
"""
if new_filter is not None:
prev = list(self._filter)
if isinstance(new_filter, Filter):
prev.extend(new_filter._filter) # pylint: disable=protected-access
elif isinstance(new_filter, list):
prev.extend(new_filter)
else:
prev.append(new_filter)
return Filter(prev)
def Not(self):
"""Returns the opposite of this filter.
Returns:
The negated filter, which will match iff this filter doesn't.
"""
return apifunction.ApiFunction.call_('Filter.not', self)
@_FilterAutoCreator
def metadata_(name, operator, value):
"""Filter on metadata. This is deprecated.
Args:
name: The property name to filter on.
operator: The type of comparison. One of:
"equals", "less_than", "greater_than", "contains", "begins_with",
"ends_with", or any of these prefixed with "not_".
value: The value to compare against.
Returns:
The new filter.
"""
operator = operator.lower()
# Check for negated filters.
negated = False
if operator.startswith('not_'):
negated = True
operator = operator[4:]
# Convert the operator to a function.
if operator not in _FUNCTION_NAMES:
raise ee_exception.EEException(
'Unknown filtering operator: %s' % operator)
func_name = 'Filter.' + _FUNCTION_NAMES[operator]
new_filter = apifunction.ApiFunction.call_(func_name, name, value)
return new_filter.Not() if negated else new_filter
@_FilterAutoCreator
def eq(name, value):
"""Filter to metadata equal to the given value."""
return apifunction.ApiFunction.call_('Filter.equals', name, value)
@_FilterAutoCreator
def neq(name, value):
"""Filter to metadata not equal to the given value."""
return Filter.eq(name, value).Not()
@_FilterAutoCreator
def lt(name, value):
"""Filter to metadata less than the given value."""
return apifunction.ApiFunction.call_('Filter.lessThan', name, value)
@_FilterAutoCreator
def gte(name, value):
"""Filter on metadata greater than or equal to the given value."""
return Filter.lt(name, value).Not()
@_FilterAutoCreator
def gt(name, value):
"""Filter on metadata greater than the given value."""
return apifunction.ApiFunction.call_('Filter.greaterThan', name, value)
@_FilterAutoCreator
def lte(name, value):
"""Filter on metadata less than or equal to the given value."""
return Filter.gt(name, value).Not()
@_FilterAutoCreator
def contains(name, value):
"""Filter on metadata containing the given string."""
return apifunction.ApiFunction.call_('Filter.stringContains', name, value)
@_FilterAutoCreator
def not_contains(name, value):
"""Filter on metadata not containing the given string."""
return Filter.contains(name, value).Not()
@_FilterAutoCreator
def starts_with(name, value):
"""Filter on metadata begining with the given string."""
return apifunction.ApiFunction.call_('Filter.stringStartsWith', name, value)
@_FilterAutoCreator
def not_starts_with(name, value):
"""Filter on metadata not begining with the given string."""
return Filter.starts_with(name, value).Not()
@_FilterAutoCreator
def ends_with(name, value):
"""Filter on metadata ending with the given string."""
return apifunction.ApiFunction.call_('Filter.stringEndsWith', name, value)
@_FilterAutoCreator
def not_ends_with(name, value):
"""Filter on metadata not ending with the given string."""
return Filter.ends_with(name, value).Not()
@_FilterAutoCreator
def And(*args):
"""Combine two or more filters using boolean AND."""
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return apifunction.ApiFunction.call_('Filter.and', args)
@staticmethod
def Or(*args):
"""Combine two or more filters using boolean OR."""
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return apifunction.ApiFunction.call_('Filter.or', args)
@_FilterAutoCreator
def date(start, opt_end=None):
"""Filter images by date.
Args:
start: The start date as a UTC datetime or ms since Unix epoch.
opt_end: The end date as a UTC datetime or ms since Unix epoch.
Returns:
The modified filter.
"""
if opt_end is None:
# Can't create half-open DateRanges. Hack around it.
opt_end = datetime.datetime(9999, 1, 1)
date_range = apifunction.ApiFunction.call_('DateRange', start, opt_end)
return apifunction.ApiFunction.apply_('Filter.dateRangeContains', {
'leftValue': date_range,
'rightField': 'system:time_start'
})
@_FilterAutoCreator
def inList(opt_leftField=None,
opt_rightValue=None,
opt_rightField=None,
opt_leftValue=None):
"""Filter on metadata contained in a list.
Args:
opt_leftField: A selector for the left operand.
Should not be specified if leftValue is specified.
opt_rightValue: The value of the right operand.
Should not be specified if rightField is specified.
opt_rightField: A selector for the right operand.
Should not be specified if rightValue is specified.
opt_leftValue: The value of the left operand.
Should not be specified if leftField is specified.
Returns:
The constructed filter.
"""
# Implement this in terms of listContains, with the arguments switched.
# In listContains the list is on the left side, while in inList it's on
# the right.
return apifunction.ApiFunction.apply_('Filter.listContains', {
'leftField': opt_rightField,
'rightValue': opt_leftValue,
'rightField': opt_leftField,
'leftValue': opt_rightValue
})
@_FilterAutoCreator
def geometry(geometry, opt_errorMargin=None):
"""Filter on bounds.
Items in the collection with a footprint that fails to intersect
the bounds will be excluded when the collection is evaluated.
Args:
geometry: The geometry to filter to either as a GeoJSON geometry,
or a FeatureCollection, from which a geometry will be extracted.
opt_errorMargin: An optional error margin. If a number, interpreted as
sphere surface meters.
Returns:
The modified filter.
"""
# Invoke geometry promotion then manually promote to a Feature.
args = {
'leftField': '.all',
'rightValue': apifunction.ApiFunction.call_('Feature', geometry)
}
if opt_errorMargin is not None:
args['maxError'] = opt_errorMargin
return apifunction.ApiFunction.apply_('Filter.intersects', args)
@staticmethod
def name():
return 'Filter'
|
en
| 0.812056
|
Collection filters. Example usage: Filter('time', low, high) .bounds(ring) .eq('time', value) .lt('time', value) # Using lowercase function naming to match the JavaScript names. # pylint: disable=g-bad-name # Our custom instance/static decorator is not recognized by lint. # pylint: disable=no-self-argument, no-method-argument, g-doc-args A decorator to make Filter methods both static and instance. If the decorated method is called as an instance method, its result is passed through _append(). # pylint: disable=protected-access # A map from the deprecated old-style comparison operator names to API # function names, implicitly prefixed with "Filter.". Negative operators # (those starting with "not_") are not included. An object to represent collection filters. Construct a filter. This constuctor accepts the following args: 1) Another filter. 2) An array of filters (which are implicitly ANDed together). 3) A ComputedObject returning a filter. Users shouldn't be making these; they're produced by the generator functions below. Args: opt_filter: Optional filter to add. # A silly call with no arguments left for backward-compatibility. # Encoding such a filter is expected to fail, but it can be composed # by calling the various methods that end up in _append(). Imports API functions to this class. Removes imported API functions from this class. Return the number of predicates that have been added to this filter. Returns: The number of predicates that have been added to this filter. This does not count nested predicates. Append a predicate to this filter. These are implicitly ANDed. Args: new_filter: The filter to append to this one. Possible types are: 1) another fully constructed Filter, 2) a JSON representation of a filter, 3) an array of 1 or 2. Returns: A new filter that is the combination of both. # pylint: disable=protected-access Returns the opposite of this filter. Returns: The negated filter, which will match iff this filter doesn't. Filter on metadata. This is deprecated. Args: name: The property name to filter on. operator: The type of comparison. One of: "equals", "less_than", "greater_than", "contains", "begins_with", "ends_with", or any of these prefixed with "not_". value: The value to compare against. Returns: The new filter. # Check for negated filters. # Convert the operator to a function. Filter to metadata equal to the given value. Filter to metadata not equal to the given value. Filter to metadata less than the given value. Filter on metadata greater than or equal to the given value. Filter on metadata greater than the given value. Filter on metadata less than or equal to the given value. Filter on metadata containing the given string. Filter on metadata not containing the given string. Filter on metadata begining with the given string. Filter on metadata not begining with the given string. Filter on metadata ending with the given string. Filter on metadata not ending with the given string. Combine two or more filters using boolean AND. Combine two or more filters using boolean OR. Filter images by date. Args: start: The start date as a UTC datetime or ms since Unix epoch. opt_end: The end date as a UTC datetime or ms since Unix epoch. Returns: The modified filter. # Can't create half-open DateRanges. Hack around it. Filter on metadata contained in a list. Args: opt_leftField: A selector for the left operand. Should not be specified if leftValue is specified. opt_rightValue: The value of the right operand. Should not be specified if rightField is specified. opt_rightField: A selector for the right operand. Should not be specified if rightValue is specified. opt_leftValue: The value of the left operand. Should not be specified if leftField is specified. Returns: The constructed filter. # Implement this in terms of listContains, with the arguments switched. # In listContains the list is on the left side, while in inList it's on # the right. Filter on bounds. Items in the collection with a footprint that fails to intersect the bounds will be excluded when the collection is evaluated. Args: geometry: The geometry to filter to either as a GeoJSON geometry, or a FeatureCollection, from which a geometry will be extracted. opt_errorMargin: An optional error margin. If a number, interpreted as sphere surface meters. Returns: The modified filter. # Invoke geometry promotion then manually promote to a Feature.
| 3.077636
| 3
|
GTSRB_dataloader.py
|
hhhhh74/DUQ
| 0
|
6629156
|
"""
@author:yebin
@file:GTSRB_dataloader.py
@IDE:Pycharm
@time:2020/12/18 下午1:46
@function:加载经过resize到256*256的GTSRB数据
@example:
@tip:
"""
from torch.utils.data import DataLoader, Dataset
from skimage import io
from torchvision import transforms
import os
import torch
class yebin_data_Train(Dataset):
def __init__(self, root, transform=None):
self.root = root
self.transform = transform
self.images = os.listdir(self.root)
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_index = self.images[index]
img_path = os.path.join(self.root, image_index)
image = io.imread(img_path)
label = img_path.split('/')[-1].split('_')[0]
label = torch.tensor(int(label))
if self.transform:
image = self.transform(image)
return image, label
class yebin_data_Test(Dataset):
def __init__(self, root, transform=None):
self.root = root
self.transform = transform
self.images = os.listdir(self.root)
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_index = self.images[index]
img_path = os.path.join(self.root, image_index)
image = io.imread(img_path)
label = img_path.split('/')[-1].split('_')[0]
label = torch.tensor(int(label))
if self.transform:
image = self.transform(image)
return image, label
def get_GTSRB_train_dataloader(root, mean, std, batch_size, num_workers, shuffle):
transform_train = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
train_gtsrb = yebin_data_Train(root=root, transform=transform_train)
#kwargs = {"num_workers": 4, "pin_memory": True}
train_gtsrb_loader = DataLoader(train_gtsrb, shuffle=shuffle, num_workers=num_workers,batch_size=batch_size, pin_memory=True)
return train_gtsrb_loader
def get_GTSRB_test_dataloader(root, mean, std, batch_size, num_workers, shuffle):
transform_test = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
test_gtsrb = yebin_data_Test(root=root, transform=transform_test)
#kwargs = {"num_workers": 4, "pin_memory": True}
test_gtsrb_loader = DataLoader(test_gtsrb, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size,pin_memory=True)
return test_gtsrb,test_gtsrb_loader
|
"""
@author:yebin
@file:GTSRB_dataloader.py
@IDE:Pycharm
@time:2020/12/18 下午1:46
@function:加载经过resize到256*256的GTSRB数据
@example:
@tip:
"""
from torch.utils.data import DataLoader, Dataset
from skimage import io
from torchvision import transforms
import os
import torch
class yebin_data_Train(Dataset):
def __init__(self, root, transform=None):
self.root = root
self.transform = transform
self.images = os.listdir(self.root)
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_index = self.images[index]
img_path = os.path.join(self.root, image_index)
image = io.imread(img_path)
label = img_path.split('/')[-1].split('_')[0]
label = torch.tensor(int(label))
if self.transform:
image = self.transform(image)
return image, label
class yebin_data_Test(Dataset):
def __init__(self, root, transform=None):
self.root = root
self.transform = transform
self.images = os.listdir(self.root)
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_index = self.images[index]
img_path = os.path.join(self.root, image_index)
image = io.imread(img_path)
label = img_path.split('/')[-1].split('_')[0]
label = torch.tensor(int(label))
if self.transform:
image = self.transform(image)
return image, label
def get_GTSRB_train_dataloader(root, mean, std, batch_size, num_workers, shuffle):
transform_train = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
train_gtsrb = yebin_data_Train(root=root, transform=transform_train)
#kwargs = {"num_workers": 4, "pin_memory": True}
train_gtsrb_loader = DataLoader(train_gtsrb, shuffle=shuffle, num_workers=num_workers,batch_size=batch_size, pin_memory=True)
return train_gtsrb_loader
def get_GTSRB_test_dataloader(root, mean, std, batch_size, num_workers, shuffle):
transform_test = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
test_gtsrb = yebin_data_Test(root=root, transform=transform_test)
#kwargs = {"num_workers": 4, "pin_memory": True}
test_gtsrb_loader = DataLoader(test_gtsrb, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size,pin_memory=True)
return test_gtsrb,test_gtsrb_loader
|
en
| 0.331926
|
@author:yebin @file:GTSRB_dataloader.py @IDE:Pycharm @time:2020/12/18 下午1:46 @function:加载经过resize到256*256的GTSRB数据 @example: @tip: # transforms.RandomRotation(15), #kwargs = {"num_workers": 4, "pin_memory": True} #kwargs = {"num_workers": 4, "pin_memory": True}
| 2.436585
| 2
|
django_visual/ide/views.py
|
Michaluch/django-visual
| 27
|
6629157
|
# -*- coding: utf-8 -*-
import os
from os.path import join, isdir
import random
import sys
import subprocess
from sys import stdout, stdin, stderr
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core.management.base import CommandError
from django.conf import settings, Settings
from create_project import (
copy_project_template,
copy_application_template
)
from open_project import (
project_context,
project_settings,
edit_installed_apps,
application_add_model,
application_edit_model
)
from run import run_manage
def index(request):
"""
IDE welcome
Open or Create Project
"""
projects_home = settings.PROJECTS_HOME
projects = []
try:
nodes = os.listdir(projects_home)
except OSError as e:
# Projects dir may not exist, let create it
os.mkdir(projects_home)
nodes = []
for node in nodes:
if isdir(join(projects_home, node)):
projects.append(node)
context = {
"projects": projects,
"templates": settings.PROJECTS_TEMPLATES
}
return render(request, 'index.html', context)
def create_project(request):
"""
Create new Django project
"""
names = settings.PROJECT_NAMES
projects_home = settings.PROJECTS_HOME
context = {
"template": request.GET.get("template", "blog"),
"title": random.choice(names) + "_" + random.choice(names),
"projects_home": projects_home,
'error': ''
}
if request.method == "POST":
template = request.POST.get("template")
title = request.POST.get("title")
try:
copy_project_template(template, title)
except CommandError, e:
context['title'] = title
context['error'] = str(e)
return render(request, 'create_project.html', context)
return redirect('open_project', project_id=title)
return render(request, 'create_project.html', context)
def open_project(request, project_id):
"""
Load project structure into IDE.
"""
project_home = join(settings.PROJECTS_HOME, project_id)
context = project_context(project_id, project_home)
context["project_id"] = project_id
return render(request, 'open_project.html', context)
def create_application(request, project_id):
"""
Creates new application for given project
"""
project_home = join(settings.PROJECTS_HOME, project_id)
if request.method == "POST":
app_name = request.POST.get("app_name")
copy_application_template(project_home, app_name)
pr_settings = project_settings(project_id, project_home)
apps = pr_settings.INSTALLED_APPS
apps.append(app_name)
edit_installed_apps(project_id, project_home, apps)
return HttpResponse("OK")
else:
return HttpResponse("POST 'app_name' of new application to create")
def add_application(request, project_id):
"""
Add existing application to INSTALLED_APPS
"""
project_home = join(settings.PROJECTS_HOME, project_id)
if request.method == "POST":
app_name = request.POST.get("app_name")
pr_settings = project_settings(project_id, project_home)
apps = pr_settings.INSTALLED_APPS
apps.append(app_name)
edit_installed_apps(project_id, project_home, apps)
return HttpResponse("OK")
else:
return HttpResponse("POST 'app_name' of new application to add")
def remove_application(request, project_id):
"""
Remove existing application from INSTALLED_APPS
"""
project_home = join(settings.PROJECTS_HOME, project_id)
if request.method == "POST":
app_name = request.POST.get("app_name")
pr_settings = project_settings(project_id, project_home)
apps = pr_settings.INSTALLED_APPS
apps.remove(app_name)
edit_installed_apps(project_id, project_home, apps)
return HttpResponse("OK")
else:
return HttpResponse("POST 'app_name' of new application to remove")
def add_model(request, project_id):
"""
Creates new model in application specified in POST data
"""
project_home = join(settings.PROJECTS_HOME, project_id)
if request.method == "POST":
application_add_model(project_id, project_home, request.POST)
return redirect("open_project", project_id=project_id)
def open_file(request):
"""
Retrieves file content into IDE to edit.
"""
path = request.GET.get("path", "")
if not path:
return HttpResponse("")
with open(path, 'r') as f:
content = f.read()
return HttpResponse(content, content_type='application/octet-stream')
def save_file(request):
"""
Saves file in IDE editor.
"""
if request.method == "POST":
path = request.POST.get("path", "")
content = request.POST.get("content", "")
with open(path, 'w') as f:
f.write(content)
return HttpResponse("File saved")
return HttpResponse("POST 'path' and 'content' of file to save")
def run_project(request, project_id):
"""
Run given project manage.py runserver 8001
"""
project_home = join(settings.PROJECTS_HOME, project_id)
# TODO: makemigrations && migrate
if request.method == "POST":
pid = run_manage(project_id, project_home)
return HttpResponse(pid)
pid = request.GET.get("pid", "")
if pid:
fh = open(join(settings.TOP_DIR, 'project_run.log'), 'r')
data = fh.read()
return HttpResponse(data)
def stop_project(request, project_id):
"""
Kills running python with manage.py inside for project
"""
if request.method == "POST":
pid = request.POST.get("pid", "")
if pid:
try:
os.kill(int(pid), 9)
return HttpResponse("OK")
except OSError, e:
return HttpResponse(str(e))
return HttpResponse("")
|
# -*- coding: utf-8 -*-
import os
from os.path import join, isdir
import random
import sys
import subprocess
from sys import stdout, stdin, stderr
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core.management.base import CommandError
from django.conf import settings, Settings
from create_project import (
copy_project_template,
copy_application_template
)
from open_project import (
project_context,
project_settings,
edit_installed_apps,
application_add_model,
application_edit_model
)
from run import run_manage
def index(request):
"""
IDE welcome
Open or Create Project
"""
projects_home = settings.PROJECTS_HOME
projects = []
try:
nodes = os.listdir(projects_home)
except OSError as e:
# Projects dir may not exist, let create it
os.mkdir(projects_home)
nodes = []
for node in nodes:
if isdir(join(projects_home, node)):
projects.append(node)
context = {
"projects": projects,
"templates": settings.PROJECTS_TEMPLATES
}
return render(request, 'index.html', context)
def create_project(request):
"""
Create new Django project
"""
names = settings.PROJECT_NAMES
projects_home = settings.PROJECTS_HOME
context = {
"template": request.GET.get("template", "blog"),
"title": random.choice(names) + "_" + random.choice(names),
"projects_home": projects_home,
'error': ''
}
if request.method == "POST":
template = request.POST.get("template")
title = request.POST.get("title")
try:
copy_project_template(template, title)
except CommandError, e:
context['title'] = title
context['error'] = str(e)
return render(request, 'create_project.html', context)
return redirect('open_project', project_id=title)
return render(request, 'create_project.html', context)
def open_project(request, project_id):
"""
Load project structure into IDE.
"""
project_home = join(settings.PROJECTS_HOME, project_id)
context = project_context(project_id, project_home)
context["project_id"] = project_id
return render(request, 'open_project.html', context)
def create_application(request, project_id):
"""
Creates new application for given project
"""
project_home = join(settings.PROJECTS_HOME, project_id)
if request.method == "POST":
app_name = request.POST.get("app_name")
copy_application_template(project_home, app_name)
pr_settings = project_settings(project_id, project_home)
apps = pr_settings.INSTALLED_APPS
apps.append(app_name)
edit_installed_apps(project_id, project_home, apps)
return HttpResponse("OK")
else:
return HttpResponse("POST 'app_name' of new application to create")
def add_application(request, project_id):
"""
Add existing application to INSTALLED_APPS
"""
project_home = join(settings.PROJECTS_HOME, project_id)
if request.method == "POST":
app_name = request.POST.get("app_name")
pr_settings = project_settings(project_id, project_home)
apps = pr_settings.INSTALLED_APPS
apps.append(app_name)
edit_installed_apps(project_id, project_home, apps)
return HttpResponse("OK")
else:
return HttpResponse("POST 'app_name' of new application to add")
def remove_application(request, project_id):
"""
Remove existing application from INSTALLED_APPS
"""
project_home = join(settings.PROJECTS_HOME, project_id)
if request.method == "POST":
app_name = request.POST.get("app_name")
pr_settings = project_settings(project_id, project_home)
apps = pr_settings.INSTALLED_APPS
apps.remove(app_name)
edit_installed_apps(project_id, project_home, apps)
return HttpResponse("OK")
else:
return HttpResponse("POST 'app_name' of new application to remove")
def add_model(request, project_id):
"""
Creates new model in application specified in POST data
"""
project_home = join(settings.PROJECTS_HOME, project_id)
if request.method == "POST":
application_add_model(project_id, project_home, request.POST)
return redirect("open_project", project_id=project_id)
def open_file(request):
"""
Retrieves file content into IDE to edit.
"""
path = request.GET.get("path", "")
if not path:
return HttpResponse("")
with open(path, 'r') as f:
content = f.read()
return HttpResponse(content, content_type='application/octet-stream')
def save_file(request):
"""
Saves file in IDE editor.
"""
if request.method == "POST":
path = request.POST.get("path", "")
content = request.POST.get("content", "")
with open(path, 'w') as f:
f.write(content)
return HttpResponse("File saved")
return HttpResponse("POST 'path' and 'content' of file to save")
def run_project(request, project_id):
"""
Run given project manage.py runserver 8001
"""
project_home = join(settings.PROJECTS_HOME, project_id)
# TODO: makemigrations && migrate
if request.method == "POST":
pid = run_manage(project_id, project_home)
return HttpResponse(pid)
pid = request.GET.get("pid", "")
if pid:
fh = open(join(settings.TOP_DIR, 'project_run.log'), 'r')
data = fh.read()
return HttpResponse(data)
def stop_project(request, project_id):
"""
Kills running python with manage.py inside for project
"""
if request.method == "POST":
pid = request.POST.get("pid", "")
if pid:
try:
os.kill(int(pid), 9)
return HttpResponse("OK")
except OSError, e:
return HttpResponse(str(e))
return HttpResponse("")
|
en
| 0.710901
|
# -*- coding: utf-8 -*- IDE welcome Open or Create Project # Projects dir may not exist, let create it Create new Django project Load project structure into IDE. Creates new application for given project Add existing application to INSTALLED_APPS Remove existing application from INSTALLED_APPS Creates new model in application specified in POST data Retrieves file content into IDE to edit. Saves file in IDE editor. Run given project manage.py runserver 8001 # TODO: makemigrations && migrate Kills running python with manage.py inside for project
| 2.231724
| 2
|
examples/test_plug.py
|
moradel82/openhtf
| 0
|
6629158
|
<reponame>moradel82/openhtf
import openhtf as htf
# Import this output mechanism as it's the specific one we want to use.
from openhtf.output.callbacks import json_factory
from openhtf.plugs import user_input
# from openhtf.plugs.testPlug import TestPlug # After install so venv knows about the path
from plug_chamber import ChamberTe1007c # Doesnt need an install
# This is an import from plugs. In my case for this to work
# the path has to be define or install openhtf after the changes
# so that the plug can be part of plugs
# inside openhtf run: "python setup.py install"
# @htf.plug(tester=TestPlug)
# def test_plug(test, tester):
# messages = ["hola", "hi", "you're welcome"]
# to_person = ['name_' + str(val) for val in range(3)] # Generate names
# for msg, to_ in zip(messages, to_person):
# rsp_t = tester.sayHello(to_)
# tester.sendMessage(msg)
# This one since the plug is directly where I'm running
# the test if can be imported without any paths issues.
# I sugges creating plugs in the same location
# Test in the same location and after
# we moved them to the plugs for openHTF
@htf.plug(chamber=ChamberTe1007c)
def test_vdivider(test, chamber):
messages = ["hola", "hi", "you're welcome"]
to_person = ['name_' + str(val) for val in range(3)] # Generate names
for msg, to_ in zip(messages, to_person):
rsp_t = chamber.setTemperature(to_)
chamber.setHumidity(msg)
if __name__ == '__main__':
# We instantiate our OpenHTF test with the phases we want to run as args.
# Multiple phases would be passed as additional args, and additional
# keyword arguments may be passed as well. See other examples for more
# complex uses.
test = htf.Test(test_vdivider) # Names of the tests that I want to
test.execute(test_start=user_input.prompt_for_test_start())
|
import openhtf as htf
# Import this output mechanism as it's the specific one we want to use.
from openhtf.output.callbacks import json_factory
from openhtf.plugs import user_input
# from openhtf.plugs.testPlug import TestPlug # After install so venv knows about the path
from plug_chamber import ChamberTe1007c # Doesnt need an install
# This is an import from plugs. In my case for this to work
# the path has to be define or install openhtf after the changes
# so that the plug can be part of plugs
# inside openhtf run: "python setup.py install"
# @htf.plug(tester=TestPlug)
# def test_plug(test, tester):
# messages = ["hola", "hi", "you're welcome"]
# to_person = ['name_' + str(val) for val in range(3)] # Generate names
# for msg, to_ in zip(messages, to_person):
# rsp_t = tester.sayHello(to_)
# tester.sendMessage(msg)
# This one since the plug is directly where I'm running
# the test if can be imported without any paths issues.
# I sugges creating plugs in the same location
# Test in the same location and after
# we moved them to the plugs for openHTF
@htf.plug(chamber=ChamberTe1007c)
def test_vdivider(test, chamber):
messages = ["hola", "hi", "you're welcome"]
to_person = ['name_' + str(val) for val in range(3)] # Generate names
for msg, to_ in zip(messages, to_person):
rsp_t = chamber.setTemperature(to_)
chamber.setHumidity(msg)
if __name__ == '__main__':
# We instantiate our OpenHTF test with the phases we want to run as args.
# Multiple phases would be passed as additional args, and additional
# keyword arguments may be passed as well. See other examples for more
# complex uses.
test = htf.Test(test_vdivider) # Names of the tests that I want to
test.execute(test_start=user_input.prompt_for_test_start())
|
en
| 0.868015
|
# Import this output mechanism as it's the specific one we want to use. # from openhtf.plugs.testPlug import TestPlug # After install so venv knows about the path # Doesnt need an install # This is an import from plugs. In my case for this to work # the path has to be define or install openhtf after the changes # so that the plug can be part of plugs # inside openhtf run: "python setup.py install" # @htf.plug(tester=TestPlug) # def test_plug(test, tester): # messages = ["hola", "hi", "you're welcome"] # to_person = ['name_' + str(val) for val in range(3)] # Generate names # for msg, to_ in zip(messages, to_person): # rsp_t = tester.sayHello(to_) # tester.sendMessage(msg) # This one since the plug is directly where I'm running # the test if can be imported without any paths issues. # I sugges creating plugs in the same location # Test in the same location and after # we moved them to the plugs for openHTF # Generate names # We instantiate our OpenHTF test with the phases we want to run as args. # Multiple phases would be passed as additional args, and additional # keyword arguments may be passed as well. See other examples for more # complex uses. # Names of the tests that I want to
| 2.408834
| 2
|
provisional-st/static_data/tools/reformat_rsr.py
|
ldj01/espa-surface-temperature
| 1
|
6629159
|
# A simple tool to reformat the LUT's generated by the IDL code
import os
import sys
from argparse import ArgumentParser
if __name__ == '__main__':
"""Reformat the file to what is expected"""
description = 'Reformat LUT files'
parser = ArgumentParser(description=description)
parser.add_argument('--input',
action='store',
dest='input',
required=True,
help='The filename for the input')
parser.add_argument('--output',
action='store',
dest='output',
required=True,
help='The filename for the output')
args = parser.parse_args()
with open(args.input, 'r') as in_fd:
with open(args.output, 'w') as out_fd:
for line in in_fd:
data = line.strip().split()
out_fd.write('{0:5.2f} {1:+11.9f}\n'
.format(float(data[0]), float(data[1])))
|
# A simple tool to reformat the LUT's generated by the IDL code
import os
import sys
from argparse import ArgumentParser
if __name__ == '__main__':
"""Reformat the file to what is expected"""
description = 'Reformat LUT files'
parser = ArgumentParser(description=description)
parser.add_argument('--input',
action='store',
dest='input',
required=True,
help='The filename for the input')
parser.add_argument('--output',
action='store',
dest='output',
required=True,
help='The filename for the output')
args = parser.parse_args()
with open(args.input, 'r') as in_fd:
with open(args.output, 'w') as out_fd:
for line in in_fd:
data = line.strip().split()
out_fd.write('{0:5.2f} {1:+11.9f}\n'
.format(float(data[0]), float(data[1])))
|
en
| 0.910584
|
# A simple tool to reformat the LUT's generated by the IDL code Reformat the file to what is expected
| 2.84522
| 3
|
third_party/bazel/tensorrt/repo.bzl
|
JamesTheZ/BladeDISC
| 1
|
6629160
|
load("//bazel:common.bzl", "files_exist")
_TENSORRT_INSTALL_PATH = "TENSORRT_INSTALL_PATH"
def _cc_import_myelin():
return """
cc_import(
name = "myelin_compiler_static",
static_library = "lib/libmyelin_compiler_static.a",
)
cc_import(
name = "myelin_executor_static",
static_library = "lib/libmyelin_executor_static.a",
)
cc_import(
name = "myelin_pattern_library_static",
static_library = "lib/libmyelin_pattern_library_static.a",
)
cc_import(
name = "myelin_pattern_runtime_static",
static_library = "lib/libmyelin_pattern_runtime_static.a",
)
cc_library(
name = "myelin_static",
deps = [
":myelin_compiler_static",
":myelin_executor_static",
":myelin_pattern_library_static",
":myelin_pattern_runtime_static",
]
)
"""
def warn(msg):
print("{red}{msg}{nc}".format(red = "\033[0;31m", msg = msg, nc = "\033[0m"))
def _impl(repo_ctx):
tensorrt_path = repo_ctx.os.environ.get(_TENSORRT_INSTALL_PATH, None)
if tensorrt_path == None:
warn("Please set the customize tensorrt library path via env var: {}".format(_TENSORRT_INSTALL_PATH))
tensorrt_path = "/usr/local/TensorRT/"
repo_ctx.symlink(tensorrt_path + "/include", "include")
repo_ctx.symlink(tensorrt_path + "/lib", "lib")
if_has_myelin = all(files_exist(
repo_ctx,
[
"lib/libmyelin_compiler_static.a",
"lib/libmyelin_executor_static.a",
"lib/libmyelin_pattern_library_static.a",
"lib/libmyelin_pattern_runtime_static.a",
],
))
repo_ctx.template("BUILD", Label("//bazel/tensorrt:trt.BUILD.tpl"), {
"%{myelin_static_rule}": _cc_import_myelin() if if_has_myelin else "",
})
repo_ctx.template("build_defs.bzl", Label("//bazel/tensorrt:build_defs.bzl.tpl"), {
"%{IF_HAS_MYELIN}": "True" if if_has_myelin else "False",
})
tensorrt_configure = repository_rule(
implementation = _impl,
local = True,
environ = [_TENSORRT_INSTALL_PATH],
)
|
load("//bazel:common.bzl", "files_exist")
_TENSORRT_INSTALL_PATH = "TENSORRT_INSTALL_PATH"
def _cc_import_myelin():
return """
cc_import(
name = "myelin_compiler_static",
static_library = "lib/libmyelin_compiler_static.a",
)
cc_import(
name = "myelin_executor_static",
static_library = "lib/libmyelin_executor_static.a",
)
cc_import(
name = "myelin_pattern_library_static",
static_library = "lib/libmyelin_pattern_library_static.a",
)
cc_import(
name = "myelin_pattern_runtime_static",
static_library = "lib/libmyelin_pattern_runtime_static.a",
)
cc_library(
name = "myelin_static",
deps = [
":myelin_compiler_static",
":myelin_executor_static",
":myelin_pattern_library_static",
":myelin_pattern_runtime_static",
]
)
"""
def warn(msg):
print("{red}{msg}{nc}".format(red = "\033[0;31m", msg = msg, nc = "\033[0m"))
def _impl(repo_ctx):
tensorrt_path = repo_ctx.os.environ.get(_TENSORRT_INSTALL_PATH, None)
if tensorrt_path == None:
warn("Please set the customize tensorrt library path via env var: {}".format(_TENSORRT_INSTALL_PATH))
tensorrt_path = "/usr/local/TensorRT/"
repo_ctx.symlink(tensorrt_path + "/include", "include")
repo_ctx.symlink(tensorrt_path + "/lib", "lib")
if_has_myelin = all(files_exist(
repo_ctx,
[
"lib/libmyelin_compiler_static.a",
"lib/libmyelin_executor_static.a",
"lib/libmyelin_pattern_library_static.a",
"lib/libmyelin_pattern_runtime_static.a",
],
))
repo_ctx.template("BUILD", Label("//bazel/tensorrt:trt.BUILD.tpl"), {
"%{myelin_static_rule}": _cc_import_myelin() if if_has_myelin else "",
})
repo_ctx.template("build_defs.bzl", Label("//bazel/tensorrt:build_defs.bzl.tpl"), {
"%{IF_HAS_MYELIN}": "True" if if_has_myelin else "False",
})
tensorrt_configure = repository_rule(
implementation = _impl,
local = True,
environ = [_TENSORRT_INSTALL_PATH],
)
|
en
| 0.667017
|
cc_import( name = "myelin_compiler_static", static_library = "lib/libmyelin_compiler_static.a", ) cc_import( name = "myelin_executor_static", static_library = "lib/libmyelin_executor_static.a", ) cc_import( name = "myelin_pattern_library_static", static_library = "lib/libmyelin_pattern_library_static.a", ) cc_import( name = "myelin_pattern_runtime_static", static_library = "lib/libmyelin_pattern_runtime_static.a", ) cc_library( name = "myelin_static", deps = [ ":myelin_compiler_static", ":myelin_executor_static", ":myelin_pattern_library_static", ":myelin_pattern_runtime_static", ] )
| 1.994777
| 2
|
challenges/prep-kit/sorting/mark_toys.py
|
Mrsteveson/Review
| 0
|
6629161
|
<reponame>Mrsteveson/Review
def maximumToys(prices, k):
count = 0
total = 0
prices.sort()
for i in prices:
total += i
if total <= k:
count+=1
return count
|
def maximumToys(prices, k):
count = 0
total = 0
prices.sort()
for i in prices:
total += i
if total <= k:
count+=1
return count
|
none
| 1
| 3.2837
| 3
|
|
gym_dockauv/objects/shape.py
|
Erikx3/gym_dockauv
| 0
|
6629162
|
import numpy as np
from abc import ABC, abstractmethod
from functools import cached_property
from typing import List
class Shape(ABC):
"""
This is a base class for any shape, should always contain center coordinates of position.
"""
def __init__(self, position: np.ndarray):
self.position = np.array(position)
@abstractmethod
def get_plot_variables(self) -> List[List[np.ndarray]]:
"""
Function that returns the plot variables for the matplotlib axes.surface_plot() function
:return: return list of list of arrays for plotting (one inner list contains plotting arrays)
"""
pass
class Sphere(Shape):
"""
Represents a sphere
"""
def __init__(self, position: np.ndarray, radius: float):
super().__init__(position) # Call inherited init functions and then add to it
self.radius = radius
def get_plot_variables(self) -> List[List[np.ndarray]]:
x_c, y_c, z_c = self.get_plot_shape(self.radius)
return [[self.position[0] + x_c,
self.position[1] + y_c,
self.position[2] + z_c]]
@staticmethod
def get_plot_shape(radius: float, scale: float = 1, sweep1: int = 20, sweep2: int = 20):
"""
Also used by capsule to create half spheres, therefor static method
:param radius: radius of sphere
:param scale: [0, 1] range for circle
:param sweep1: first sweep of mesh
:param sweep2: second sweep of mesh
:return: x, y, z coordinates for plotting function
"""
u, v = np.mgrid[0:scale * np.pi:sweep1 * 1j, 0:2 * np.pi:sweep2 * 1j]
x_c = radius * np.sin(u) * np.cos(v)
y_c = radius * np.sin(u) * np.sin(v)
z_c = radius * np.cos(u)
return x_c, y_c, z_c
class Spheres:
"""
Helper class to access and store data from all spheres
Most important is to access all the positions and radius as a big array for vectorized functions
This class can be enhanced with update or adding features, but are not needed in this case
"""
def __init__(self, spheres: List[Sphere]):
l = len(spheres)
self.position = np.zeros((l, 3))
self.radius = np.zeros(l)
self.objs = []
for count, sphere in enumerate(spheres):
self.position[count, :] = sphere.position
self.radius[count] = sphere.radius
self.objs.append(sphere)
def __call__(self) -> List[Sphere]:
"""
When this class is called as a function, return the spheres
:return: Return the list of spheres
"""
return self.objs
class Capsule(Shape):
"""
Represents a Capsule, height is the total height, position is the center of the cylinder
.. note:
so far the half sphere we use a full sphere yet as long as there is no method for just plotting the
necessary half sphere
"""
def __init__(self, position: np.ndarray, radius: float, vec_top: np.ndarray):
"""
:param position: Position of center of capsule
:param radius: radius of capsule
:param vec_top: line endpoint of axis of capsule (not to the very end, until sphere planar area)
"""
super().__init__(position) # Call inherited init functions and then add to it
self.radius = radius
self.vec_top = vec_top
self.vec_bot = self.position - (self.vec_top - self.position)
def get_plot_variables(self) -> List[List[np.ndarray]]:
x_c, y_c, z_c = self.get_plot_shape_cyl
return [[x_c, y_c, z_c], *self.get_plot_shape_sph]
@cached_property
def get_plot_shape_sph(self):
# NOTE: This only works when capsule is aligned with z axis
x_c1, y_c1, z_c1 = Sphere.get_plot_shape(self.radius, scale=0.5)
x_c2, y_c2, z_c2 = [x_c1 + self.vec_top[0], y_c1 + self.vec_top[1], -z_c1 + self.vec_top[2]]
x_c1, y_c1, z_c1 = [x_c1 + self.vec_bot[0], y_c1 + self.vec_bot[1], z_c1 + self.vec_bot[2]]
return [[x_c1, y_c1, z_c1], [x_c2, y_c2, z_c2]]
@cached_property
def get_plot_shape_cyl(self):
"""
Adapted from:
https://stackoverflow.com/questions/39822480/plotting-a-solid-cylinder-centered-on-a-plane-in-matplotlib
"""
# vector in direction of axis
v = self.vec_top - self.vec_bot
# find magnitude of vector
mag = np.linalg.norm(v)
# unit vector in direction of axis
v = v / mag
# make some vector not in the same direction as v
not_v = np.array([1, 0, 0])
if (v == not_v).all():
not_v = np.array([0, 1, 0])
# make vector perpendicular to v
n1 = np.cross(v, not_v)
# normalize n1
n1 /= np.linalg.norm(n1)
# make unit vector perpendicular to v and n1
n2 = np.cross(v, n1)
# surface ranges over t from 0 to length of axis and 0 to 2*pi
t = np.linspace(0, mag, 2)
theta = np.linspace(0, 2 * np.pi, 20)
rsample = np.linspace(0, self.radius, 2)
# use meshgrid to make 2d arrays
t, theta2 = np.meshgrid(t, theta)
# rsample, theta = np.meshgrid(rsample, theta)
# generate coordinates for surface
# "Tube"
x_c, y_c, z_c = [
self.vec_bot[i] + v[i] * t + self.radius * np.sin(theta2) * n1[i] + self.radius * np.cos(theta2) * n2[i] for
i in [0, 1, 2]]
return x_c, y_c, z_c
def collision_sphere_sphere(pos1: np.ndarray, rad1: float, pos2: np.ndarray, rad2: float) -> bool:
"""
Determining whether two sphere objects collide
:param pos1: (3,) array for position of first object
:param rad1: radius of first object
:param pos2: (3,) array for position of second object
:param rad2: radius of second object
:return: returns true for collision
"""
return np.linalg.norm(pos1 - pos2) <= rad1 + rad2
def collision_sphere_spheres(pos1: np.ndarray, rad1: float, pos2: np.ndarray, rad2: np.ndarray) -> bool:
"""
Determining whether one sphere 1 collides with any of multiple spheres
:param pos1: (3,) array for position of first object
:param rad1: radius of first object
:param pos2: (n,3) array for position of all other spheres
:param rad2: radius of all other spheres
:return: returns true if collision
"""
return np.any(np.linalg.norm(pos2 - pos1[None, :], axis=1) <= rad1 + rad2)
def collision_capsule_sphere(cap1: np.ndarray, cap2: np.ndarray, cap_rad: float,
sph_pos: np.ndarray, sph_rad: float) -> bool:
"""
Determining whether a cylinder collides with a sphere
:param cap1: (3,) array for the position of one of the capsule ends
:param cap2: (3,) array for the position of the other capsule end
:param cap_rad: radius of cylinder
:param sph_pos: (3,) array for position of sphere
:param sph_rad: radius of sphere
:return: returns true for collision
"""
# Closest distance between sphere center and capsule line
dist = dist_line_point(sph_pos, cap1, cap2)
# Check for collision
return dist <= cap_rad + sph_rad
def intersec_dist_line_sphere(l1: np.ndarray, ld: np.ndarray, center: np.ndarray, rad: float):
"""
From: https://iquilezles.org/articles/intersectors/
:param l1: array (3,) for line starting point
:param ld: array (3,) for the line direction from the starting point (does not need to be unit vector)
:param center: array(3,) for the center of the sphere
:param rad: radius of the sphere
"""
oc = l1 - center # type: np.ndarray
rd = ld / np.linalg.norm(ld)
b = np.dot(oc, rd) # will be float
c = np.dot(oc, oc) - rad * rad # will be float
h = b * b - c # float
if h < 0.0:
return -np.inf # no intersection
h = np.sqrt(h)
return min([-b + h, -b - h], key=abs)
def intersec_dist_lines_spheres_vectorized(l1: np.ndarray, ld: np.ndarray, center: np.ndarray, rad: np.ndarray):
"""
Adapted from: https://iquilezles.org/articles/intersectors/
This functions calculates the minimum distance of all intersection between a number of rays and multiple spheres
nl is the number of rays, ns the number of sphere for the dimensions below in the description
:param l1: array (nl, 3) for line starting points
:param ld: array (nl, 3) for the line directions from the starting points (does not need to be unit vector)
:param center: array(ns, 3) for the center of the spheres
:param rad: array(ns,) for the radius of the spheres
:return: array(nl,) with the shortest intersection distance in the direction of each ray, otherwise result is
something negative (as no criteria for intersect "behind" the starting point is not well defined)
"""
# array(3,) between each start point and center
oc = l1[:, None] - center # array(nl, ns, 3)
rd = ld / np.linalg.norm(ld, axis=1)[:, None] # array(nl, 3)
# (nl, ns, 3) . (3, nl) -> (nl, ns, nl); then taking diagonal -> (nl, ns, 1)
b = np.dot(oc, rd.T)[range(rd.shape[0]), :, range(rd.shape[0])] #np.diagonal(np.dot(oc, rd.T), axis1=1, axis2=2)
# (nl, ns, 3) . (3, ns, nl) -> (nl, ns, nl); then taking diagonal -> (nl, ns, 1)
c = np.linalg.norm(oc, axis=2)**2 - rad**2
h = b * b - c # float
h[h < 0.0] = -np.inf # no intersection at these points
mask = h >= 0.0
h[mask] = np.sqrt(h[mask])
res = np.minimum(-b + h, -b - h) # This would not work if starting point is within sphere
# Only return the closest positive distance, otherwise it is just a random negative value (of 1st intersec)
return res[np.arange(res.shape[0]), np.where(res > 0, res, np.inf).argmin(axis=1)]
def intersec_dist_line_capsule(l1: np.ndarray, ld: np.ndarray, cap1: np.ndarray, cap2: np.ndarray,
cap_rad: float) -> float:
"""
return closest distance from starting point to intersection of capsule, otherwise returns -np.inf if no intersection
is found. Intersection point can then be found by multiplying unit vector in direction of line by this distance.
.. note::
This solution ALWAYS finds the first intersection (if there is) in the direction of the ray vector. This
means, it does not matter where the starting point is, it finds the first intersection in direction of the ray
vector and can thus also return negative values
Solution found here:
https://iquilezles.org/articles/intersectors/
:param l1: array (3,) for line starting point
:param ld: array (3,) for the line direction from the starting point (does not need to be unit vector)
:param cap1: array (3,) for capsule start
:param cap2: array (3,) for capsule end
:param cap_rad: capsule radius
:return: distance from line starting point to intersection, -np.inf if no intersection at all
"""
ba = cap2 - cap1
oa = l1 - cap1
# direction of vector as unit vector
rd = ld / np.linalg.norm(ld)
baba = np.dot(ba, ba)
bard = np.dot(ba, rd)
baoa = np.dot(ba, oa)
rdoa = np.dot(rd, oa)
oaoa = np.dot(oa, oa)
a = baba - bard * bard
b = baba * rdoa - baoa * bard
c = baba * oaoa - baoa * baoa - cap_rad * cap_rad * baba
h = b * b - a * c
if h >= 0.0:
t = (-b - np.sqrt(h)) / a
y = baoa + t * bard
# body
if 0.0 < y < baba:
return t
# caps
oc = oa if y <= 0.0 else l1 - cap2
b = np.dot(rd, oc)
c = np.dot(oc, oc) - cap_rad * cap_rad
h2 = b * b - c
if h2 > 0.0:
return -b - np.sqrt(h2)
return -np.inf
def intersec_dist_line_capsule_vectorized(l1: np.ndarray, ld: np.ndarray, cap1: np.ndarray, cap2: np.ndarray,
cap_rad: float, default: float = -np.inf) -> np.ndarray:
"""
Return the closest distance for multiple lines defined as in l1 and ld and find the shortest distances for ONE
capsule
:param l1: array (n,3) for lines starting point
:param ld: array (n,3) for the lines direction from the starting point (does not need to be unit vector)
:param cap1: array (3,) for capsule start
:param cap2: array (3,) for capsule end
:param cap_rad: capsule radius
:param default: default number if no intersection is found
:return: array(n,) with the distances calculated
"""
ba = (cap2 - cap1)
oa = l1 - cap1
# direction of vector as unit vector
rd = ld / np.linalg.norm(ld, axis=1)[:, None]
baba = np.dot(ba, ba)
bard = np.dot(rd, ba)
baoa = np.dot(oa, ba)
rdoa = np.diag(np.dot(rd, oa.T))
oaoa = np.diag(np.dot(oa, oa.T))
a = baba - bard * bard
b = baba * rdoa - baoa * bard
c = baba * oaoa - baoa * baoa - cap_rad * cap_rad * baba
h = b * b - a * c
res = np.zeros(l1.shape[0])
# Vectorize conditional statements
mask_h = h >= 0
t = np.zeros(h.shape[0])
t[~mask_h] = -np.inf
t[mask_h] = (-b[mask_h] - np.sqrt(h[mask_h])) / a[mask_h]
y = baoa + t * bard
# body
mask_body = (h >= 0) & (y > 0) & (y < baba)
res[mask_body] = t[mask_body]
# caps
oc = np.zeros(l1.shape)
oc[y <= 0.0] = oa[y <= 0.0]
oc[y >= 0.0] = (l1 - cap2)[y >= 0.0]
b = np.diag(np.dot(rd, oc.T))
c = np.diag(np.dot(oc, oc.T)) - cap_rad * cap_rad
h2 = b * b - c
mask_caps = (h >= 0) & (h2 > 0.0) & ~mask_body
res[mask_caps] = (-b[mask_caps] - np.sqrt(h2[mask_caps])) # Double indexing to avoid runtime warning with sqrt
# No intersection or behind:
res[(h <= 0) | (res == 0)] = default
return res
def dist_line_point(po: np.ndarray, l1: np.ndarray, l2: np.ndarray) -> float:
"""
Function to calculate the closest distance between a line segment and a point
From: https://stackoverflow.com/questions/56463412/distance-from-a-point-to-a-line-segment-in-3d-python
:param po: array (3,) for the point position
:param l1: array (3,) for start of line
:param l2: array (3,) for end of line
:return: shortest distance between line and point
"""
# normalized tangent vector
d = np.divide(l2 - l1, np.linalg.norm(l2 - l1))
# signed parallel distance components
s = np.dot(l1 - po, d)
t = np.dot(po - l2, d)
# clamped parallel distance
h = np.maximum.reduce([s, t, 0])
# perpendicular distance component
c = np.cross(po - l1, d)
return np.hypot(h, np.linalg.norm(c))
def vec_line_point(po: np.ndarray, l1: np.ndarray, l2: np.ndarray) -> np.ndarray:
"""
This function returns the vector pointing from the line towards the point
:param po: array (3,) for the point position
:param l1: array (3,) for start of line
:param l2: array (3,) for end of line
:return: array(3,) pointing from line to point
"""
d_vec = (l2 - l1) / np.linalg.norm(l2 - l1) # Unit vector for line
v = po - l1
t = np.dot(v, d_vec) # Projection distance
pro = l1 + t * d_vec # Projected point on line
return pro - po
|
import numpy as np
from abc import ABC, abstractmethod
from functools import cached_property
from typing import List
class Shape(ABC):
"""
This is a base class for any shape, should always contain center coordinates of position.
"""
def __init__(self, position: np.ndarray):
self.position = np.array(position)
@abstractmethod
def get_plot_variables(self) -> List[List[np.ndarray]]:
"""
Function that returns the plot variables for the matplotlib axes.surface_plot() function
:return: return list of list of arrays for plotting (one inner list contains plotting arrays)
"""
pass
class Sphere(Shape):
"""
Represents a sphere
"""
def __init__(self, position: np.ndarray, radius: float):
super().__init__(position) # Call inherited init functions and then add to it
self.radius = radius
def get_plot_variables(self) -> List[List[np.ndarray]]:
x_c, y_c, z_c = self.get_plot_shape(self.radius)
return [[self.position[0] + x_c,
self.position[1] + y_c,
self.position[2] + z_c]]
@staticmethod
def get_plot_shape(radius: float, scale: float = 1, sweep1: int = 20, sweep2: int = 20):
"""
Also used by capsule to create half spheres, therefor static method
:param radius: radius of sphere
:param scale: [0, 1] range for circle
:param sweep1: first sweep of mesh
:param sweep2: second sweep of mesh
:return: x, y, z coordinates for plotting function
"""
u, v = np.mgrid[0:scale * np.pi:sweep1 * 1j, 0:2 * np.pi:sweep2 * 1j]
x_c = radius * np.sin(u) * np.cos(v)
y_c = radius * np.sin(u) * np.sin(v)
z_c = radius * np.cos(u)
return x_c, y_c, z_c
class Spheres:
"""
Helper class to access and store data from all spheres
Most important is to access all the positions and radius as a big array for vectorized functions
This class can be enhanced with update or adding features, but are not needed in this case
"""
def __init__(self, spheres: List[Sphere]):
l = len(spheres)
self.position = np.zeros((l, 3))
self.radius = np.zeros(l)
self.objs = []
for count, sphere in enumerate(spheres):
self.position[count, :] = sphere.position
self.radius[count] = sphere.radius
self.objs.append(sphere)
def __call__(self) -> List[Sphere]:
"""
When this class is called as a function, return the spheres
:return: Return the list of spheres
"""
return self.objs
class Capsule(Shape):
"""
Represents a Capsule, height is the total height, position is the center of the cylinder
.. note:
so far the half sphere we use a full sphere yet as long as there is no method for just plotting the
necessary half sphere
"""
def __init__(self, position: np.ndarray, radius: float, vec_top: np.ndarray):
"""
:param position: Position of center of capsule
:param radius: radius of capsule
:param vec_top: line endpoint of axis of capsule (not to the very end, until sphere planar area)
"""
super().__init__(position) # Call inherited init functions and then add to it
self.radius = radius
self.vec_top = vec_top
self.vec_bot = self.position - (self.vec_top - self.position)
def get_plot_variables(self) -> List[List[np.ndarray]]:
x_c, y_c, z_c = self.get_plot_shape_cyl
return [[x_c, y_c, z_c], *self.get_plot_shape_sph]
@cached_property
def get_plot_shape_sph(self):
# NOTE: This only works when capsule is aligned with z axis
x_c1, y_c1, z_c1 = Sphere.get_plot_shape(self.radius, scale=0.5)
x_c2, y_c2, z_c2 = [x_c1 + self.vec_top[0], y_c1 + self.vec_top[1], -z_c1 + self.vec_top[2]]
x_c1, y_c1, z_c1 = [x_c1 + self.vec_bot[0], y_c1 + self.vec_bot[1], z_c1 + self.vec_bot[2]]
return [[x_c1, y_c1, z_c1], [x_c2, y_c2, z_c2]]
@cached_property
def get_plot_shape_cyl(self):
"""
Adapted from:
https://stackoverflow.com/questions/39822480/plotting-a-solid-cylinder-centered-on-a-plane-in-matplotlib
"""
# vector in direction of axis
v = self.vec_top - self.vec_bot
# find magnitude of vector
mag = np.linalg.norm(v)
# unit vector in direction of axis
v = v / mag
# make some vector not in the same direction as v
not_v = np.array([1, 0, 0])
if (v == not_v).all():
not_v = np.array([0, 1, 0])
# make vector perpendicular to v
n1 = np.cross(v, not_v)
# normalize n1
n1 /= np.linalg.norm(n1)
# make unit vector perpendicular to v and n1
n2 = np.cross(v, n1)
# surface ranges over t from 0 to length of axis and 0 to 2*pi
t = np.linspace(0, mag, 2)
theta = np.linspace(0, 2 * np.pi, 20)
rsample = np.linspace(0, self.radius, 2)
# use meshgrid to make 2d arrays
t, theta2 = np.meshgrid(t, theta)
# rsample, theta = np.meshgrid(rsample, theta)
# generate coordinates for surface
# "Tube"
x_c, y_c, z_c = [
self.vec_bot[i] + v[i] * t + self.radius * np.sin(theta2) * n1[i] + self.radius * np.cos(theta2) * n2[i] for
i in [0, 1, 2]]
return x_c, y_c, z_c
def collision_sphere_sphere(pos1: np.ndarray, rad1: float, pos2: np.ndarray, rad2: float) -> bool:
"""
Determining whether two sphere objects collide
:param pos1: (3,) array for position of first object
:param rad1: radius of first object
:param pos2: (3,) array for position of second object
:param rad2: radius of second object
:return: returns true for collision
"""
return np.linalg.norm(pos1 - pos2) <= rad1 + rad2
def collision_sphere_spheres(pos1: np.ndarray, rad1: float, pos2: np.ndarray, rad2: np.ndarray) -> bool:
"""
Determining whether one sphere 1 collides with any of multiple spheres
:param pos1: (3,) array for position of first object
:param rad1: radius of first object
:param pos2: (n,3) array for position of all other spheres
:param rad2: radius of all other spheres
:return: returns true if collision
"""
return np.any(np.linalg.norm(pos2 - pos1[None, :], axis=1) <= rad1 + rad2)
def collision_capsule_sphere(cap1: np.ndarray, cap2: np.ndarray, cap_rad: float,
sph_pos: np.ndarray, sph_rad: float) -> bool:
"""
Determining whether a cylinder collides with a sphere
:param cap1: (3,) array for the position of one of the capsule ends
:param cap2: (3,) array for the position of the other capsule end
:param cap_rad: radius of cylinder
:param sph_pos: (3,) array for position of sphere
:param sph_rad: radius of sphere
:return: returns true for collision
"""
# Closest distance between sphere center and capsule line
dist = dist_line_point(sph_pos, cap1, cap2)
# Check for collision
return dist <= cap_rad + sph_rad
def intersec_dist_line_sphere(l1: np.ndarray, ld: np.ndarray, center: np.ndarray, rad: float):
"""
From: https://iquilezles.org/articles/intersectors/
:param l1: array (3,) for line starting point
:param ld: array (3,) for the line direction from the starting point (does not need to be unit vector)
:param center: array(3,) for the center of the sphere
:param rad: radius of the sphere
"""
oc = l1 - center # type: np.ndarray
rd = ld / np.linalg.norm(ld)
b = np.dot(oc, rd) # will be float
c = np.dot(oc, oc) - rad * rad # will be float
h = b * b - c # float
if h < 0.0:
return -np.inf # no intersection
h = np.sqrt(h)
return min([-b + h, -b - h], key=abs)
def intersec_dist_lines_spheres_vectorized(l1: np.ndarray, ld: np.ndarray, center: np.ndarray, rad: np.ndarray):
"""
Adapted from: https://iquilezles.org/articles/intersectors/
This functions calculates the minimum distance of all intersection between a number of rays and multiple spheres
nl is the number of rays, ns the number of sphere for the dimensions below in the description
:param l1: array (nl, 3) for line starting points
:param ld: array (nl, 3) for the line directions from the starting points (does not need to be unit vector)
:param center: array(ns, 3) for the center of the spheres
:param rad: array(ns,) for the radius of the spheres
:return: array(nl,) with the shortest intersection distance in the direction of each ray, otherwise result is
something negative (as no criteria for intersect "behind" the starting point is not well defined)
"""
# array(3,) between each start point and center
oc = l1[:, None] - center # array(nl, ns, 3)
rd = ld / np.linalg.norm(ld, axis=1)[:, None] # array(nl, 3)
# (nl, ns, 3) . (3, nl) -> (nl, ns, nl); then taking diagonal -> (nl, ns, 1)
b = np.dot(oc, rd.T)[range(rd.shape[0]), :, range(rd.shape[0])] #np.diagonal(np.dot(oc, rd.T), axis1=1, axis2=2)
# (nl, ns, 3) . (3, ns, nl) -> (nl, ns, nl); then taking diagonal -> (nl, ns, 1)
c = np.linalg.norm(oc, axis=2)**2 - rad**2
h = b * b - c # float
h[h < 0.0] = -np.inf # no intersection at these points
mask = h >= 0.0
h[mask] = np.sqrt(h[mask])
res = np.minimum(-b + h, -b - h) # This would not work if starting point is within sphere
# Only return the closest positive distance, otherwise it is just a random negative value (of 1st intersec)
return res[np.arange(res.shape[0]), np.where(res > 0, res, np.inf).argmin(axis=1)]
def intersec_dist_line_capsule(l1: np.ndarray, ld: np.ndarray, cap1: np.ndarray, cap2: np.ndarray,
cap_rad: float) -> float:
"""
return closest distance from starting point to intersection of capsule, otherwise returns -np.inf if no intersection
is found. Intersection point can then be found by multiplying unit vector in direction of line by this distance.
.. note::
This solution ALWAYS finds the first intersection (if there is) in the direction of the ray vector. This
means, it does not matter where the starting point is, it finds the first intersection in direction of the ray
vector and can thus also return negative values
Solution found here:
https://iquilezles.org/articles/intersectors/
:param l1: array (3,) for line starting point
:param ld: array (3,) for the line direction from the starting point (does not need to be unit vector)
:param cap1: array (3,) for capsule start
:param cap2: array (3,) for capsule end
:param cap_rad: capsule radius
:return: distance from line starting point to intersection, -np.inf if no intersection at all
"""
ba = cap2 - cap1
oa = l1 - cap1
# direction of vector as unit vector
rd = ld / np.linalg.norm(ld)
baba = np.dot(ba, ba)
bard = np.dot(ba, rd)
baoa = np.dot(ba, oa)
rdoa = np.dot(rd, oa)
oaoa = np.dot(oa, oa)
a = baba - bard * bard
b = baba * rdoa - baoa * bard
c = baba * oaoa - baoa * baoa - cap_rad * cap_rad * baba
h = b * b - a * c
if h >= 0.0:
t = (-b - np.sqrt(h)) / a
y = baoa + t * bard
# body
if 0.0 < y < baba:
return t
# caps
oc = oa if y <= 0.0 else l1 - cap2
b = np.dot(rd, oc)
c = np.dot(oc, oc) - cap_rad * cap_rad
h2 = b * b - c
if h2 > 0.0:
return -b - np.sqrt(h2)
return -np.inf
def intersec_dist_line_capsule_vectorized(l1: np.ndarray, ld: np.ndarray, cap1: np.ndarray, cap2: np.ndarray,
cap_rad: float, default: float = -np.inf) -> np.ndarray:
"""
Return the closest distance for multiple lines defined as in l1 and ld and find the shortest distances for ONE
capsule
:param l1: array (n,3) for lines starting point
:param ld: array (n,3) for the lines direction from the starting point (does not need to be unit vector)
:param cap1: array (3,) for capsule start
:param cap2: array (3,) for capsule end
:param cap_rad: capsule radius
:param default: default number if no intersection is found
:return: array(n,) with the distances calculated
"""
ba = (cap2 - cap1)
oa = l1 - cap1
# direction of vector as unit vector
rd = ld / np.linalg.norm(ld, axis=1)[:, None]
baba = np.dot(ba, ba)
bard = np.dot(rd, ba)
baoa = np.dot(oa, ba)
rdoa = np.diag(np.dot(rd, oa.T))
oaoa = np.diag(np.dot(oa, oa.T))
a = baba - bard * bard
b = baba * rdoa - baoa * bard
c = baba * oaoa - baoa * baoa - cap_rad * cap_rad * baba
h = b * b - a * c
res = np.zeros(l1.shape[0])
# Vectorize conditional statements
mask_h = h >= 0
t = np.zeros(h.shape[0])
t[~mask_h] = -np.inf
t[mask_h] = (-b[mask_h] - np.sqrt(h[mask_h])) / a[mask_h]
y = baoa + t * bard
# body
mask_body = (h >= 0) & (y > 0) & (y < baba)
res[mask_body] = t[mask_body]
# caps
oc = np.zeros(l1.shape)
oc[y <= 0.0] = oa[y <= 0.0]
oc[y >= 0.0] = (l1 - cap2)[y >= 0.0]
b = np.diag(np.dot(rd, oc.T))
c = np.diag(np.dot(oc, oc.T)) - cap_rad * cap_rad
h2 = b * b - c
mask_caps = (h >= 0) & (h2 > 0.0) & ~mask_body
res[mask_caps] = (-b[mask_caps] - np.sqrt(h2[mask_caps])) # Double indexing to avoid runtime warning with sqrt
# No intersection or behind:
res[(h <= 0) | (res == 0)] = default
return res
def dist_line_point(po: np.ndarray, l1: np.ndarray, l2: np.ndarray) -> float:
"""
Function to calculate the closest distance between a line segment and a point
From: https://stackoverflow.com/questions/56463412/distance-from-a-point-to-a-line-segment-in-3d-python
:param po: array (3,) for the point position
:param l1: array (3,) for start of line
:param l2: array (3,) for end of line
:return: shortest distance between line and point
"""
# normalized tangent vector
d = np.divide(l2 - l1, np.linalg.norm(l2 - l1))
# signed parallel distance components
s = np.dot(l1 - po, d)
t = np.dot(po - l2, d)
# clamped parallel distance
h = np.maximum.reduce([s, t, 0])
# perpendicular distance component
c = np.cross(po - l1, d)
return np.hypot(h, np.linalg.norm(c))
def vec_line_point(po: np.ndarray, l1: np.ndarray, l2: np.ndarray) -> np.ndarray:
"""
This function returns the vector pointing from the line towards the point
:param po: array (3,) for the point position
:param l1: array (3,) for start of line
:param l2: array (3,) for end of line
:return: array(3,) pointing from line to point
"""
d_vec = (l2 - l1) / np.linalg.norm(l2 - l1) # Unit vector for line
v = po - l1
t = np.dot(v, d_vec) # Projection distance
pro = l1 + t * d_vec # Projected point on line
return pro - po
|
en
| 0.774628
|
This is a base class for any shape, should always contain center coordinates of position. Function that returns the plot variables for the matplotlib axes.surface_plot() function :return: return list of list of arrays for plotting (one inner list contains plotting arrays) Represents a sphere # Call inherited init functions and then add to it Also used by capsule to create half spheres, therefor static method :param radius: radius of sphere :param scale: [0, 1] range for circle :param sweep1: first sweep of mesh :param sweep2: second sweep of mesh :return: x, y, z coordinates for plotting function Helper class to access and store data from all spheres Most important is to access all the positions and radius as a big array for vectorized functions This class can be enhanced with update or adding features, but are not needed in this case When this class is called as a function, return the spheres :return: Return the list of spheres Represents a Capsule, height is the total height, position is the center of the cylinder .. note: so far the half sphere we use a full sphere yet as long as there is no method for just plotting the necessary half sphere :param position: Position of center of capsule :param radius: radius of capsule :param vec_top: line endpoint of axis of capsule (not to the very end, until sphere planar area) # Call inherited init functions and then add to it # NOTE: This only works when capsule is aligned with z axis Adapted from: https://stackoverflow.com/questions/39822480/plotting-a-solid-cylinder-centered-on-a-plane-in-matplotlib # vector in direction of axis # find magnitude of vector # unit vector in direction of axis # make some vector not in the same direction as v # make vector perpendicular to v # normalize n1 # make unit vector perpendicular to v and n1 # surface ranges over t from 0 to length of axis and 0 to 2*pi # use meshgrid to make 2d arrays # rsample, theta = np.meshgrid(rsample, theta) # generate coordinates for surface # "Tube" Determining whether two sphere objects collide :param pos1: (3,) array for position of first object :param rad1: radius of first object :param pos2: (3,) array for position of second object :param rad2: radius of second object :return: returns true for collision Determining whether one sphere 1 collides with any of multiple spheres :param pos1: (3,) array for position of first object :param rad1: radius of first object :param pos2: (n,3) array for position of all other spheres :param rad2: radius of all other spheres :return: returns true if collision Determining whether a cylinder collides with a sphere :param cap1: (3,) array for the position of one of the capsule ends :param cap2: (3,) array for the position of the other capsule end :param cap_rad: radius of cylinder :param sph_pos: (3,) array for position of sphere :param sph_rad: radius of sphere :return: returns true for collision # Closest distance between sphere center and capsule line # Check for collision From: https://iquilezles.org/articles/intersectors/ :param l1: array (3,) for line starting point :param ld: array (3,) for the line direction from the starting point (does not need to be unit vector) :param center: array(3,) for the center of the sphere :param rad: radius of the sphere # type: np.ndarray # will be float # will be float # float # no intersection Adapted from: https://iquilezles.org/articles/intersectors/ This functions calculates the minimum distance of all intersection between a number of rays and multiple spheres nl is the number of rays, ns the number of sphere for the dimensions below in the description :param l1: array (nl, 3) for line starting points :param ld: array (nl, 3) for the line directions from the starting points (does not need to be unit vector) :param center: array(ns, 3) for the center of the spheres :param rad: array(ns,) for the radius of the spheres :return: array(nl,) with the shortest intersection distance in the direction of each ray, otherwise result is something negative (as no criteria for intersect "behind" the starting point is not well defined) # array(3,) between each start point and center # array(nl, ns, 3) # array(nl, 3) # (nl, ns, 3) . (3, nl) -> (nl, ns, nl); then taking diagonal -> (nl, ns, 1) #np.diagonal(np.dot(oc, rd.T), axis1=1, axis2=2) # (nl, ns, 3) . (3, ns, nl) -> (nl, ns, nl); then taking diagonal -> (nl, ns, 1) # float # no intersection at these points # This would not work if starting point is within sphere # Only return the closest positive distance, otherwise it is just a random negative value (of 1st intersec) return closest distance from starting point to intersection of capsule, otherwise returns -np.inf if no intersection is found. Intersection point can then be found by multiplying unit vector in direction of line by this distance. .. note:: This solution ALWAYS finds the first intersection (if there is) in the direction of the ray vector. This means, it does not matter where the starting point is, it finds the first intersection in direction of the ray vector and can thus also return negative values Solution found here: https://iquilezles.org/articles/intersectors/ :param l1: array (3,) for line starting point :param ld: array (3,) for the line direction from the starting point (does not need to be unit vector) :param cap1: array (3,) for capsule start :param cap2: array (3,) for capsule end :param cap_rad: capsule radius :return: distance from line starting point to intersection, -np.inf if no intersection at all # direction of vector as unit vector # body # caps Return the closest distance for multiple lines defined as in l1 and ld and find the shortest distances for ONE capsule :param l1: array (n,3) for lines starting point :param ld: array (n,3) for the lines direction from the starting point (does not need to be unit vector) :param cap1: array (3,) for capsule start :param cap2: array (3,) for capsule end :param cap_rad: capsule radius :param default: default number if no intersection is found :return: array(n,) with the distances calculated # direction of vector as unit vector # Vectorize conditional statements # body # caps # Double indexing to avoid runtime warning with sqrt # No intersection or behind: Function to calculate the closest distance between a line segment and a point From: https://stackoverflow.com/questions/56463412/distance-from-a-point-to-a-line-segment-in-3d-python :param po: array (3,) for the point position :param l1: array (3,) for start of line :param l2: array (3,) for end of line :return: shortest distance between line and point # normalized tangent vector # signed parallel distance components # clamped parallel distance # perpendicular distance component This function returns the vector pointing from the line towards the point :param po: array (3,) for the point position :param l1: array (3,) for start of line :param l2: array (3,) for end of line :return: array(3,) pointing from line to point # Unit vector for line # Projection distance # Projected point on line
| 3.639998
| 4
|
src/screen.py
|
pjimenezmateo/chip8
| 0
|
6629163
|
import pyglet
class Screen(pyglet.window.Window):
display_buffer = None
screen_width = 64
screen_height = 32
tile_size = 10
clock = None
def __init__(self, width, height):
super(Screen, self).__init__(width=width, height=height, visible=True, vsync=False)
self.clear_buffer()
pyglet.clock.set_fps_limit(30)
def on_draw(self):
self.clear()
pixels_to_draw = pyglet.graphics.Batch()
borders_to_draw = pyglet.graphics.Batch()
for x in range(self.screen_width):
for y in range(self.screen_height):
if self.display_buffer[x][y]:
x_position = x * self.tile_size
y_position = ((self.screen_height*self.tile_size) - self.tile_size) - y * self.tile_size
pixel_list = ('v2f', [
x_position + 0, y_position + 0,
x_position + self.tile_size, y_position + 0,
x_position + self.tile_size, y_position + self.tile_size,
x_position + 0, y_position + self.tile_size
])
border_list = ('v2f', [
x_position + 0, y_position + 0,
x_position + self.tile_size, y_position + 0,
x_position + self.tile_size, y_position + 0,
x_position + self.tile_size, y_position + self.tile_size,
x_position + self.tile_size, y_position + self.tile_size,
x_position + 0, y_position + self.tile_size,
x_position + 0, y_position + self.tile_size,
x_position + 0, y_position + 0
])
pixels_to_draw.add(4, pyglet.gl.GL_QUADS, None, pixel_list)
borders_to_draw.add(8, pyglet.gl.GL_LINES, None, border_list)
pyglet.gl.glColor3f(1, 1, 1)
pixels_to_draw.draw()
pyglet.gl.glColor3f(0, 0, 0)
borders_to_draw.draw()
def clear_buffer(self):
self.display_buffer = [[0 for y in range(self.screen_height)] for x in range(self.screen_width)]
def get_pixel(self, x, y):
return self.display_buffer[x][y]
def set_pixel(self, x, y, val):
self.display_buffer[x][y] = val
def render_once(self):
pyglet.clock.tick()
self.switch_to()
self.dispatch_events()
self.dispatch_event('on_draw')
self.flip()
def check_keys(self):
self.switch_to()
self.dispatch_events()
self.flip()
|
import pyglet
class Screen(pyglet.window.Window):
display_buffer = None
screen_width = 64
screen_height = 32
tile_size = 10
clock = None
def __init__(self, width, height):
super(Screen, self).__init__(width=width, height=height, visible=True, vsync=False)
self.clear_buffer()
pyglet.clock.set_fps_limit(30)
def on_draw(self):
self.clear()
pixels_to_draw = pyglet.graphics.Batch()
borders_to_draw = pyglet.graphics.Batch()
for x in range(self.screen_width):
for y in range(self.screen_height):
if self.display_buffer[x][y]:
x_position = x * self.tile_size
y_position = ((self.screen_height*self.tile_size) - self.tile_size) - y * self.tile_size
pixel_list = ('v2f', [
x_position + 0, y_position + 0,
x_position + self.tile_size, y_position + 0,
x_position + self.tile_size, y_position + self.tile_size,
x_position + 0, y_position + self.tile_size
])
border_list = ('v2f', [
x_position + 0, y_position + 0,
x_position + self.tile_size, y_position + 0,
x_position + self.tile_size, y_position + 0,
x_position + self.tile_size, y_position + self.tile_size,
x_position + self.tile_size, y_position + self.tile_size,
x_position + 0, y_position + self.tile_size,
x_position + 0, y_position + self.tile_size,
x_position + 0, y_position + 0
])
pixels_to_draw.add(4, pyglet.gl.GL_QUADS, None, pixel_list)
borders_to_draw.add(8, pyglet.gl.GL_LINES, None, border_list)
pyglet.gl.glColor3f(1, 1, 1)
pixels_to_draw.draw()
pyglet.gl.glColor3f(0, 0, 0)
borders_to_draw.draw()
def clear_buffer(self):
self.display_buffer = [[0 for y in range(self.screen_height)] for x in range(self.screen_width)]
def get_pixel(self, x, y):
return self.display_buffer[x][y]
def set_pixel(self, x, y, val):
self.display_buffer[x][y] = val
def render_once(self):
pyglet.clock.tick()
self.switch_to()
self.dispatch_events()
self.dispatch_event('on_draw')
self.flip()
def check_keys(self):
self.switch_to()
self.dispatch_events()
self.flip()
|
none
| 1
| 2.835627
| 3
|
|
python/jsbeautifier/core/inputscanner.py
|
bmewburn/js-beautify
| 56
|
6629164
|
# The MIT License (MIT)
#
# Copyright (c) 2007-2018 <NAME>, <NAME>, and contributors.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
class InputScanner:
def __init__(self, input_string):
self.__six = __import__("six")
if input_string is None:
input_string = ''
self.__input = input_string
self.__input_length = len(self.__input)
self.__position = 0
def restart(self):
self.__position = 0
def back(self):
if self.__position > 0:
self.__position -= 1
def hasNext(self):
return self.__position < self.__input_length
def next(self):
val = None
if self.hasNext():
val = self.__input[self.__position]
self.__position += 1
return val
def peek(self, index=0):
val = None
index += self.__position
if index >= 0 and index < self.__input_length:
val = self.__input[index]
return val
def test(self, pattern, index=0):
index += self.__position
return index >= 0 and index < self.__input_length and bool(
pattern.match(self.__input, index))
def testChar(self, pattern, index=0):
# test one character regex match
val = self.peek(index)
return val is not None and bool(pattern.match(val))
def match(self, pattern):
pattern_match = None
if self.hasNext():
pattern_match = pattern.match(self.__input, self.__position)
if bool(pattern_match):
self.__position = pattern_match.end(0)
return pattern_match
def read(self, starting_pattern, until_pattern=None, until_after=False):
val = ''
pattern_match = None
if bool(starting_pattern):
pattern_match = self.match(starting_pattern)
if bool(pattern_match):
val = pattern_match.group(0)
if bool(until_pattern) and \
(bool(pattern_match) or not bool(starting_pattern)):
val += self.readUntil(until_pattern, until_after)
return val
def readUntil(self, pattern, include_match=False):
val = ''
pattern_match = None
match_index = self.__position
if self.hasNext():
pattern_match = pattern.search(self.__input, self.__position)
if bool(pattern_match):
if include_match:
match_index = pattern_match.end(0)
else:
match_index = pattern_match.start(0)
else:
match_index = self.__input_length
val = self.__input[self.__position:match_index]
self.__position = match_index
return val
def readUntilAfter(self, pattern):
return self.readUntil(pattern, True)
def get_regexp(self, pattern, match_from=False):
result = None
# strings are converted to regexp
if isinstance(pattern, self.__six.string_types) and pattern != '':
result = re.compile(pattern)
elif pattern is not None:
result = re.compile(pattern.pattern)
return result
# css beautifier legacy helpers
def peekUntilAfter(self, pattern):
start = self.__position
val = self.readUntilAfter(pattern)
self.__position = start
return val
def lookBack(self, testVal):
start = self.__position - 1
return start >= len(testVal) and \
self.__input[start - len(testVal):start].lower() == testVal
|
# The MIT License (MIT)
#
# Copyright (c) 2007-2018 <NAME>, <NAME>, and contributors.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
class InputScanner:
def __init__(self, input_string):
self.__six = __import__("six")
if input_string is None:
input_string = ''
self.__input = input_string
self.__input_length = len(self.__input)
self.__position = 0
def restart(self):
self.__position = 0
def back(self):
if self.__position > 0:
self.__position -= 1
def hasNext(self):
return self.__position < self.__input_length
def next(self):
val = None
if self.hasNext():
val = self.__input[self.__position]
self.__position += 1
return val
def peek(self, index=0):
val = None
index += self.__position
if index >= 0 and index < self.__input_length:
val = self.__input[index]
return val
def test(self, pattern, index=0):
index += self.__position
return index >= 0 and index < self.__input_length and bool(
pattern.match(self.__input, index))
def testChar(self, pattern, index=0):
# test one character regex match
val = self.peek(index)
return val is not None and bool(pattern.match(val))
def match(self, pattern):
pattern_match = None
if self.hasNext():
pattern_match = pattern.match(self.__input, self.__position)
if bool(pattern_match):
self.__position = pattern_match.end(0)
return pattern_match
def read(self, starting_pattern, until_pattern=None, until_after=False):
val = ''
pattern_match = None
if bool(starting_pattern):
pattern_match = self.match(starting_pattern)
if bool(pattern_match):
val = pattern_match.group(0)
if bool(until_pattern) and \
(bool(pattern_match) or not bool(starting_pattern)):
val += self.readUntil(until_pattern, until_after)
return val
def readUntil(self, pattern, include_match=False):
val = ''
pattern_match = None
match_index = self.__position
if self.hasNext():
pattern_match = pattern.search(self.__input, self.__position)
if bool(pattern_match):
if include_match:
match_index = pattern_match.end(0)
else:
match_index = pattern_match.start(0)
else:
match_index = self.__input_length
val = self.__input[self.__position:match_index]
self.__position = match_index
return val
def readUntilAfter(self, pattern):
return self.readUntil(pattern, True)
def get_regexp(self, pattern, match_from=False):
result = None
# strings are converted to regexp
if isinstance(pattern, self.__six.string_types) and pattern != '':
result = re.compile(pattern)
elif pattern is not None:
result = re.compile(pattern.pattern)
return result
# css beautifier legacy helpers
def peekUntilAfter(self, pattern):
start = self.__position
val = self.readUntilAfter(pattern)
self.__position = start
return val
def lookBack(self, testVal):
start = self.__position - 1
return start >= len(testVal) and \
self.__input[start - len(testVal):start].lower() == testVal
|
en
| 0.765782
|
# The MIT License (MIT) # # Copyright (c) 2007-2018 <NAME>, <NAME>, and contributors. # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # test one character regex match # strings are converted to regexp # css beautifier legacy helpers
| 2.213665
| 2
|
aiobungie/error.py
|
coxir/aiobungie
| 0
|
6629165
|
<gh_stars>0
# MIT License
#
# Copyright (c) 2020 - Present nxtlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""aiobungie Exceptions."""
from __future__ import annotations
__all__: Sequence[str] = [
"PlayerNotFound",
"HashError",
"ActivityNotFound",
"CharacterTypeError",
"JsonError",
"ClanNotFound",
"CharacterNotFound",
"NotFound",
"HTTPException",
"UserNotFound",
]
from typing import final, Sequence
@final
class PlayerNotFound(Exception):
"""Raised when a `aiobungie.objects.Player` is not found."""
@final
class HashError(Exception):
"""Raised when `aiobungie.objects.Activity.hash` used for modes that are not raids."""
@final
class ActivityNotFound(Exception):
"""Raised when a `aiobungie.objects.Activity` not found."""
@final
class CharacterTypeError(Exception):
"""Raised on a character type error."""
@final
class JsonError(Exception):
"""Raised when an HTTP request did not return a json response."""
@final
class CharacterNotFound(Exception):
"""Raised when a `aiobungie.objects.Character` not found."""
@final
class HTTPException(Exception):
"""Exception for handling `aiobungie.http.HTTPClient` requests errors."""
@final
class ClanNotFound(Exception):
"""Raised when a `aiobungie.objects.Clan` not found."""
@final
class NotFound(Exception):
"""Raised when an unknown request was not found."""
@final
class UserNotFound(Exception):
"""Raised when a `aiobungie.objects.User` not found."""
|
# MIT License
#
# Copyright (c) 2020 - Present nxtlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""aiobungie Exceptions."""
from __future__ import annotations
__all__: Sequence[str] = [
"PlayerNotFound",
"HashError",
"ActivityNotFound",
"CharacterTypeError",
"JsonError",
"ClanNotFound",
"CharacterNotFound",
"NotFound",
"HTTPException",
"UserNotFound",
]
from typing import final, Sequence
@final
class PlayerNotFound(Exception):
"""Raised when a `aiobungie.objects.Player` is not found."""
@final
class HashError(Exception):
"""Raised when `aiobungie.objects.Activity.hash` used for modes that are not raids."""
@final
class ActivityNotFound(Exception):
"""Raised when a `aiobungie.objects.Activity` not found."""
@final
class CharacterTypeError(Exception):
"""Raised on a character type error."""
@final
class JsonError(Exception):
"""Raised when an HTTP request did not return a json response."""
@final
class CharacterNotFound(Exception):
"""Raised when a `aiobungie.objects.Character` not found."""
@final
class HTTPException(Exception):
"""Exception for handling `aiobungie.http.HTTPClient` requests errors."""
@final
class ClanNotFound(Exception):
"""Raised when a `aiobungie.objects.Clan` not found."""
@final
class NotFound(Exception):
"""Raised when an unknown request was not found."""
@final
class UserNotFound(Exception):
"""Raised when a `aiobungie.objects.User` not found."""
|
en
| 0.753836
|
# MIT License # # Copyright (c) 2020 - Present nxtlo # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. aiobungie Exceptions. Raised when a `aiobungie.objects.Player` is not found. Raised when `aiobungie.objects.Activity.hash` used for modes that are not raids. Raised when a `aiobungie.objects.Activity` not found. Raised on a character type error. Raised when an HTTP request did not return a json response. Raised when a `aiobungie.objects.Character` not found. Exception for handling `aiobungie.http.HTTPClient` requests errors. Raised when a `aiobungie.objects.Clan` not found. Raised when an unknown request was not found. Raised when a `aiobungie.objects.User` not found.
| 1.889235
| 2
|
src/pykeen/triples/instances.py
|
Rodrigo-A-Pereira/pykeen
| 0
|
6629166
|
<reponame>Rodrigo-A-Pereira/pykeen
# -*- coding: utf-8 -*-
"""Implementation of basic instance factory which creates just instances based on standard KG triples."""
from abc import ABC
from dataclasses import dataclass
from typing import Generic, Mapping, Optional, Tuple, TypeVar
import numpy as np
import scipy.sparse
import torch
from torch.utils import data
from ..typing import MappedTriples
from ..utils import fix_dataclass_init_docs
__all__ = [
'Instances',
'SLCWAInstances',
'LCWAInstances',
'MultimodalInstances',
'MultimodalSLCWAInstances',
'MultimodalLCWAInstances',
]
BatchType = TypeVar("BatchType")
LCWASampleType = Tuple[MappedTriples, torch.FloatTensor]
LCWABatchType = Tuple[MappedTriples, torch.FloatTensor]
SLCWASampleType = TypeVar('SLCWASampleType', bound=MappedTriples)
SLCWABatchType = Tuple[MappedTriples, MappedTriples, Optional[torch.BoolTensor]]
@fix_dataclass_init_docs
@dataclass
class Instances(data.Dataset, Generic[BatchType], ABC):
"""Triples and mappings to their indices."""
def __len__(self): # noqa:D401
"""The number of instances."""
raise NotImplementedError
def __getitem__(self, item: int) -> BatchType: # noqa: D105
raise NotImplementedError
@classmethod
def from_triples(cls, mapped_triples: MappedTriples, num_entities: int) -> 'Instances':
"""Create instances from mapped triples.
:param mapped_triples: shape: (num_triples, 3)
The ID-based triples.
:param num_entities:
The number of entities.
:return:
The instances.
"""
raise NotImplementedError
@fix_dataclass_init_docs
@dataclass
class SLCWAInstances(Instances[MappedTriples]):
"""Triples and mappings to their indices for sLCWA."""
#: The mapped triples, shape: (num_triples, 3)
mapped_triples: MappedTriples
def __len__(self): # noqa: D105
return self.mapped_triples.shape[0]
def __getitem__(self, item: int) -> MappedTriples: # noqa: D105
return self.mapped_triples[item]
@classmethod
def from_triples(cls, mapped_triples: MappedTriples, num_entities: int) -> Instances: # noqa:D102
return cls(mapped_triples=mapped_triples)
@fix_dataclass_init_docs
@dataclass
class LCWAInstances(Instances[LCWABatchType]):
"""Triples and mappings to their indices for LCWA."""
#: The unique pairs
pairs: np.ndarray
#: The compressed triples in CSR format
compressed: scipy.sparse.csr_matrix
@classmethod
def from_triples(cls, mapped_triples: MappedTriples, num_entities: int) -> Instances:
"""
Create LCWA instances from triples.
:param mapped_triples: shape: (num_triples, 3)
The ID-based triples.
:param num_entities:
The number of entities.
:return:
The instances.
"""
mapped_triples = mapped_triples.numpy()
unique_hr, pair_idx_to_triple_idx = np.unique(mapped_triples[:, :2], return_inverse=True, axis=0)
num_pairs = unique_hr.shape[0]
tails = mapped_triples[:, 2]
compressed = scipy.sparse.coo_matrix(
(np.ones(mapped_triples.shape[0], dtype=np.float32), (pair_idx_to_triple_idx, tails)),
shape=(num_pairs, num_entities),
)
# convert to csr for fast row slicing
compressed = compressed.tocsr()
return cls(pairs=unique_hr, compressed=compressed)
def __len__(self) -> int: # noqa: D105
return self.pairs.shape[0]
def __getitem__(self, item: int) -> LCWABatchType: # noqa: D105
return self.pairs[item], np.asarray(self.compressed[item, :].todense())[0, :]
@fix_dataclass_init_docs
@dataclass
class MultimodalInstances(Instances):
"""Triples and mappings to their indices as well as multimodal data."""
#: TODO: do we need these?
numeric_literals: Mapping[str, np.ndarray]
literals_to_id: Mapping[str, int]
@fix_dataclass_init_docs
@dataclass
class MultimodalSLCWAInstances(SLCWAInstances, MultimodalInstances):
"""Triples and mappings to their indices as well as multimodal data for sLCWA."""
@fix_dataclass_init_docs
@dataclass
class MultimodalLCWAInstances(LCWAInstances, MultimodalInstances):
"""Triples and mappings to their indices as well as multimodal data for LCWA."""
|
# -*- coding: utf-8 -*-
"""Implementation of basic instance factory which creates just instances based on standard KG triples."""
from abc import ABC
from dataclasses import dataclass
from typing import Generic, Mapping, Optional, Tuple, TypeVar
import numpy as np
import scipy.sparse
import torch
from torch.utils import data
from ..typing import MappedTriples
from ..utils import fix_dataclass_init_docs
__all__ = [
'Instances',
'SLCWAInstances',
'LCWAInstances',
'MultimodalInstances',
'MultimodalSLCWAInstances',
'MultimodalLCWAInstances',
]
BatchType = TypeVar("BatchType")
LCWASampleType = Tuple[MappedTriples, torch.FloatTensor]
LCWABatchType = Tuple[MappedTriples, torch.FloatTensor]
SLCWASampleType = TypeVar('SLCWASampleType', bound=MappedTriples)
SLCWABatchType = Tuple[MappedTriples, MappedTriples, Optional[torch.BoolTensor]]
@fix_dataclass_init_docs
@dataclass
class Instances(data.Dataset, Generic[BatchType], ABC):
"""Triples and mappings to their indices."""
def __len__(self): # noqa:D401
"""The number of instances."""
raise NotImplementedError
def __getitem__(self, item: int) -> BatchType: # noqa: D105
raise NotImplementedError
@classmethod
def from_triples(cls, mapped_triples: MappedTriples, num_entities: int) -> 'Instances':
"""Create instances from mapped triples.
:param mapped_triples: shape: (num_triples, 3)
The ID-based triples.
:param num_entities:
The number of entities.
:return:
The instances.
"""
raise NotImplementedError
@fix_dataclass_init_docs
@dataclass
class SLCWAInstances(Instances[MappedTriples]):
"""Triples and mappings to their indices for sLCWA."""
#: The mapped triples, shape: (num_triples, 3)
mapped_triples: MappedTriples
def __len__(self): # noqa: D105
return self.mapped_triples.shape[0]
def __getitem__(self, item: int) -> MappedTriples: # noqa: D105
return self.mapped_triples[item]
@classmethod
def from_triples(cls, mapped_triples: MappedTriples, num_entities: int) -> Instances: # noqa:D102
return cls(mapped_triples=mapped_triples)
@fix_dataclass_init_docs
@dataclass
class LCWAInstances(Instances[LCWABatchType]):
"""Triples and mappings to their indices for LCWA."""
#: The unique pairs
pairs: np.ndarray
#: The compressed triples in CSR format
compressed: scipy.sparse.csr_matrix
@classmethod
def from_triples(cls, mapped_triples: MappedTriples, num_entities: int) -> Instances:
"""
Create LCWA instances from triples.
:param mapped_triples: shape: (num_triples, 3)
The ID-based triples.
:param num_entities:
The number of entities.
:return:
The instances.
"""
mapped_triples = mapped_triples.numpy()
unique_hr, pair_idx_to_triple_idx = np.unique(mapped_triples[:, :2], return_inverse=True, axis=0)
num_pairs = unique_hr.shape[0]
tails = mapped_triples[:, 2]
compressed = scipy.sparse.coo_matrix(
(np.ones(mapped_triples.shape[0], dtype=np.float32), (pair_idx_to_triple_idx, tails)),
shape=(num_pairs, num_entities),
)
# convert to csr for fast row slicing
compressed = compressed.tocsr()
return cls(pairs=unique_hr, compressed=compressed)
def __len__(self) -> int: # noqa: D105
return self.pairs.shape[0]
def __getitem__(self, item: int) -> LCWABatchType: # noqa: D105
return self.pairs[item], np.asarray(self.compressed[item, :].todense())[0, :]
@fix_dataclass_init_docs
@dataclass
class MultimodalInstances(Instances):
"""Triples and mappings to their indices as well as multimodal data."""
#: TODO: do we need these?
numeric_literals: Mapping[str, np.ndarray]
literals_to_id: Mapping[str, int]
@fix_dataclass_init_docs
@dataclass
class MultimodalSLCWAInstances(SLCWAInstances, MultimodalInstances):
"""Triples and mappings to their indices as well as multimodal data for sLCWA."""
@fix_dataclass_init_docs
@dataclass
class MultimodalLCWAInstances(LCWAInstances, MultimodalInstances):
"""Triples and mappings to their indices as well as multimodal data for LCWA."""
|
en
| 0.893596
|
# -*- coding: utf-8 -*- Implementation of basic instance factory which creates just instances based on standard KG triples. Triples and mappings to their indices. # noqa:D401 The number of instances. # noqa: D105 Create instances from mapped triples. :param mapped_triples: shape: (num_triples, 3) The ID-based triples. :param num_entities: The number of entities. :return: The instances. Triples and mappings to their indices for sLCWA. #: The mapped triples, shape: (num_triples, 3) # noqa: D105 # noqa: D105 # noqa:D102 Triples and mappings to their indices for LCWA. #: The unique pairs #: The compressed triples in CSR format Create LCWA instances from triples. :param mapped_triples: shape: (num_triples, 3) The ID-based triples. :param num_entities: The number of entities. :return: The instances. # convert to csr for fast row slicing # noqa: D105 # noqa: D105 Triples and mappings to their indices as well as multimodal data. #: TODO: do we need these? Triples and mappings to their indices as well as multimodal data for sLCWA. Triples and mappings to their indices as well as multimodal data for LCWA.
| 2.540572
| 3
|
hoodApp/urls.py
|
umunadine/Hood
| 0
|
6629167
|
<reponame>umunadine/Hood<gh_stars>0
from django.conf.urls import url
from django.conf import settings
from . import views
from django.conf.urls.static import static
urlpatterns = [
url(r'^$',views.home,name='home'),
url(r'^accounts/profile/', views.my_profile, name='my_profile'),
url(r'^index',views.index,name='index'),
url(r'^join/(\d+)', views.join, name='join'),
url(r'^register/',views.register, name='register'),
url(r'^my_profile',views.my_profile,name = 'my_profile'),
url(r'^leave',views.leave,name = 'leave'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^business/(\d+)', views.business, name='business'),
url(r'^newbusiness/',views.newbusiness, name='newbusiness'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
from django.conf.urls import url
from django.conf import settings
from . import views
from django.conf.urls.static import static
urlpatterns = [
url(r'^$',views.home,name='home'),
url(r'^accounts/profile/', views.my_profile, name='my_profile'),
url(r'^index',views.index,name='index'),
url(r'^join/(\d+)', views.join, name='join'),
url(r'^register/',views.register, name='register'),
url(r'^my_profile',views.my_profile,name = 'my_profile'),
url(r'^leave',views.leave,name = 'leave'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^business/(\d+)', views.business, name='business'),
url(r'^newbusiness/',views.newbusiness, name='newbusiness'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
none
| 1
| 1.868757
| 2
|
|
controllers/dvi.py
|
himansu1997/eden
| 4
|
6629168
|
# -*- coding: utf-8 -*-
""" Disaster Victim Identification, Controllers """
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def s3_menu_postp():
# @todo: rewrite this for new framework
menu_selected = []
body_id = s3base.s3_get_last_record_id("dvi_body")
if body_id:
body = s3db.dvi_body
query = (body.id == body_id)
record = db(query).select(body.id, body.pe_label,
limitby=(0, 1)).first()
if record:
label = record.pe_label
response.menu_options[-3][-1].append(
[T("Candidate Matches for Body %(label)s") % dict(label=label),
False, URL(f="person",
vars=dict(match=record.id))]
)
menu_selected.append(
["%s: %s" % (T("Body"), label),
False, URL(f="body", args=[record.id])]
)
person_id = s3base.s3_get_last_record_id("pr_person")
if person_id:
person = s3db.pr_person
query = (person.id == person_id)
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = s3db.pr_person_id().represent(record.id)
menu_selected.append(
["%s: %s" % (T("Person"), name),
False, URL(f="person", args=[record.id])]
)
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[c].get("name_nice", T("Disaster Victim Identification"))
btable = s3db.dvi_body
itable = s3db.dvi_identification
query = (btable.deleted == False)
left = itable.on(itable.pe_id == btable.pe_id)
body_count = btable.id.count()
rows = db(query).select(body_count,
itable.status,
left=left,
groupby=itable.status)
numbers = {None: 0}
for row in rows:
numbers[row[itable.status]] = row[body_count]
total = sum(numbers.values())
dvi_id_status = dict(s3db.dvi_id_status)
dvi_id_status[None] = T("unidentified")
statistics = []
for status in dvi_id_status:
count = numbers.get(status) or 0
statistics.append((str(dvi_id_status[status]), count))
response.title = module_name
return dict(module_name=module_name,
total=total,
status=json.dumps(statistics))
# -----------------------------------------------------------------------------
def recreq():
""" Recovery Requests List """
table = s3db.dvi_recreq
table.person_id.default = auth.s3_logged_in_person()
def prep(r):
if r.interactive and not r.record:
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
table.bodies_recovered.writable = False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def morgue():
""" Morgue Registry """
morgue_tabs = [(T("Morgue Details"), ""),
(T("Bodies"), "body"),
]
rheader = S3ResourceHeader([[(T("Morgue"), "name")]
], tabs=morgue_tabs)
# Pre-processor
def prep(r):
# Function to call for all Site Instance Types
from s3db.org import org_site_prep
org_site_prep(r)
return True
s3.prep = prep
return s3_rest_controller(rheader = rheader)
# -----------------------------------------------------------------------------
def body():
""" Dead Bodies Registry """
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T("unknown")
ntable = s3db.pr_note
ntable.status.readable = False
ntable.status.writable = False
dvi_tabs = [(T("Recovery"), ""),
(T("Checklist"), "checklist"),
(T("Images"), "image"),
(T("Physical Description"), "physical_description"),
(T("Effects Inventory"), "effects"),
(T("Journal"), "note"),
(T("Identification"), "identification"),
]
rheader = S3ResourceHeader([[(T("ID Tag Number"), "pe_label")],
["gender"],
["age_group"],
],
tabs=dvi_tabs)
return s3_rest_controller(rheader=rheader)
# -----------------------------------------------------------------------------
def person():
""" Missing Persons Registry (Match Finder) """
table = s3db.pr_person
s3.crud_strings["pr_person"].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons found"),
msg_no_match = T("No Persons currently reported missing"))
s3db.configure("pr_group_membership",
list_fields = ["id",
"group_id",
"group_head",
"comments"
],
)
s3db.configure("pr_person",
deletable = False,
editable = False,
listadd = False,
list_fields = ["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group"
],
)
def prep(r):
if not r.id and not r.method and not r.component:
body_id = r.get_vars.get("match", None)
body = db(db.dvi_body.id == body_id).select(db.dvi_body.pe_label,
limitby = (0, 1)
).first()
label = body and body.pe_label or "#%s" % body_id
if body_id:
query = dvi_match_query(body_id)
r.resource.add_filter(query)
s3.crud_strings["pr_person"].update(
#subtitle_list = T("Candidate Matches for Body %s" % label),
msg_no_match = T("No matching records found"))
return True
s3.prep = prep
# @ToDo: Add to crud_fields
field = s3db.pr_person_details.missing.default = True
table.age_group.readable = True
table.age_group.writable = True
# Show only missing persons in list views
if len(request.args) == 0:
from s3 import FS
s3.filter = (FS("person_details.missing") == True)
mpr_tabs = [(T("Missing Report"), "missing_report"),
(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Journal"), "note"),
]
rheader = lambda r: s3db.pr_rheader(r, tabs=mpr_tabs)
return s3_rest_controller("pr", "person",
main = "first_name",
extra = "last_name",
rheader = rheader,
)
# -------------------------------------------------------------------------
def dvi_match_query(body_id):
"""
Get a query for candidate matches between the missing
persons registry and a dead body
@param body_id: the dvi_body record ID
"""
ptable = s3db.pr_person
pdtable = s3db.pr_person_details
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((ptable.deleted == False) & \
(pdtable.person_id == ptable.id) & \
(pdtable.missing == True) & \
(ntable.pe_id == ptable.pe_id) & \
(ntable.status == 1))
body = db(btable.body_id == body_id).select(btable.date_of_recovery,
btable.age_group,
btable.gender,
limitby = (0, 1)
).first()
if not body:
return query
# last seen should be before date of recovery
if body.date_of_recovery:
q = ((ntable.timestmp <= body.date_of_recovery) | \
(ntable.timestmp == None))
query &= q
# age group should match
if body.age_group and body.age_group != 1:
q = ((ptable.age_group == None) | \
(ptable.age_group == 1) | \
(ptable.age_group == body.age_group))
query &= q
# gender should match
if body.gender and body.gender != 1:
q = ((ptable.gender == None) | \
(ptable.gender == 1) | \
(ptable.gender == body.gender))
query &= q
return query
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax Tooltips """
formfield = request.vars.get("formfield", None)
if formfield:
response.view = "pr/ajaxtips/%s.html" % formfield
return {}
# END =========================================================================
|
# -*- coding: utf-8 -*-
""" Disaster Victim Identification, Controllers """
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def s3_menu_postp():
# @todo: rewrite this for new framework
menu_selected = []
body_id = s3base.s3_get_last_record_id("dvi_body")
if body_id:
body = s3db.dvi_body
query = (body.id == body_id)
record = db(query).select(body.id, body.pe_label,
limitby=(0, 1)).first()
if record:
label = record.pe_label
response.menu_options[-3][-1].append(
[T("Candidate Matches for Body %(label)s") % dict(label=label),
False, URL(f="person",
vars=dict(match=record.id))]
)
menu_selected.append(
["%s: %s" % (T("Body"), label),
False, URL(f="body", args=[record.id])]
)
person_id = s3base.s3_get_last_record_id("pr_person")
if person_id:
person = s3db.pr_person
query = (person.id == person_id)
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = s3db.pr_person_id().represent(record.id)
menu_selected.append(
["%s: %s" % (T("Person"), name),
False, URL(f="person", args=[record.id])]
)
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[c].get("name_nice", T("Disaster Victim Identification"))
btable = s3db.dvi_body
itable = s3db.dvi_identification
query = (btable.deleted == False)
left = itable.on(itable.pe_id == btable.pe_id)
body_count = btable.id.count()
rows = db(query).select(body_count,
itable.status,
left=left,
groupby=itable.status)
numbers = {None: 0}
for row in rows:
numbers[row[itable.status]] = row[body_count]
total = sum(numbers.values())
dvi_id_status = dict(s3db.dvi_id_status)
dvi_id_status[None] = T("unidentified")
statistics = []
for status in dvi_id_status:
count = numbers.get(status) or 0
statistics.append((str(dvi_id_status[status]), count))
response.title = module_name
return dict(module_name=module_name,
total=total,
status=json.dumps(statistics))
# -----------------------------------------------------------------------------
def recreq():
""" Recovery Requests List """
table = s3db.dvi_recreq
table.person_id.default = auth.s3_logged_in_person()
def prep(r):
if r.interactive and not r.record:
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
table.bodies_recovered.writable = False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def morgue():
""" Morgue Registry """
morgue_tabs = [(T("Morgue Details"), ""),
(T("Bodies"), "body"),
]
rheader = S3ResourceHeader([[(T("Morgue"), "name")]
], tabs=morgue_tabs)
# Pre-processor
def prep(r):
# Function to call for all Site Instance Types
from s3db.org import org_site_prep
org_site_prep(r)
return True
s3.prep = prep
return s3_rest_controller(rheader = rheader)
# -----------------------------------------------------------------------------
def body():
""" Dead Bodies Registry """
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T("unknown")
ntable = s3db.pr_note
ntable.status.readable = False
ntable.status.writable = False
dvi_tabs = [(T("Recovery"), ""),
(T("Checklist"), "checklist"),
(T("Images"), "image"),
(T("Physical Description"), "physical_description"),
(T("Effects Inventory"), "effects"),
(T("Journal"), "note"),
(T("Identification"), "identification"),
]
rheader = S3ResourceHeader([[(T("ID Tag Number"), "pe_label")],
["gender"],
["age_group"],
],
tabs=dvi_tabs)
return s3_rest_controller(rheader=rheader)
# -----------------------------------------------------------------------------
def person():
""" Missing Persons Registry (Match Finder) """
table = s3db.pr_person
s3.crud_strings["pr_person"].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons found"),
msg_no_match = T("No Persons currently reported missing"))
s3db.configure("pr_group_membership",
list_fields = ["id",
"group_id",
"group_head",
"comments"
],
)
s3db.configure("pr_person",
deletable = False,
editable = False,
listadd = False,
list_fields = ["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group"
],
)
def prep(r):
if not r.id and not r.method and not r.component:
body_id = r.get_vars.get("match", None)
body = db(db.dvi_body.id == body_id).select(db.dvi_body.pe_label,
limitby = (0, 1)
).first()
label = body and body.pe_label or "#%s" % body_id
if body_id:
query = dvi_match_query(body_id)
r.resource.add_filter(query)
s3.crud_strings["pr_person"].update(
#subtitle_list = T("Candidate Matches for Body %s" % label),
msg_no_match = T("No matching records found"))
return True
s3.prep = prep
# @ToDo: Add to crud_fields
field = s3db.pr_person_details.missing.default = True
table.age_group.readable = True
table.age_group.writable = True
# Show only missing persons in list views
if len(request.args) == 0:
from s3 import FS
s3.filter = (FS("person_details.missing") == True)
mpr_tabs = [(T("Missing Report"), "missing_report"),
(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Journal"), "note"),
]
rheader = lambda r: s3db.pr_rheader(r, tabs=mpr_tabs)
return s3_rest_controller("pr", "person",
main = "first_name",
extra = "last_name",
rheader = rheader,
)
# -------------------------------------------------------------------------
def dvi_match_query(body_id):
"""
Get a query for candidate matches between the missing
persons registry and a dead body
@param body_id: the dvi_body record ID
"""
ptable = s3db.pr_person
pdtable = s3db.pr_person_details
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((ptable.deleted == False) & \
(pdtable.person_id == ptable.id) & \
(pdtable.missing == True) & \
(ntable.pe_id == ptable.pe_id) & \
(ntable.status == 1))
body = db(btable.body_id == body_id).select(btable.date_of_recovery,
btable.age_group,
btable.gender,
limitby = (0, 1)
).first()
if not body:
return query
# last seen should be before date of recovery
if body.date_of_recovery:
q = ((ntable.timestmp <= body.date_of_recovery) | \
(ntable.timestmp == None))
query &= q
# age group should match
if body.age_group and body.age_group != 1:
q = ((ptable.age_group == None) | \
(ptable.age_group == 1) | \
(ptable.age_group == body.age_group))
query &= q
# gender should match
if body.gender and body.gender != 1:
q = ((ptable.gender == None) | \
(ptable.gender == 1) | \
(ptable.gender == body.gender))
query &= q
return query
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax Tooltips """
formfield = request.vars.get("formfield", None)
if formfield:
response.view = "pr/ajaxtips/%s.html" % formfield
return {}
# END =========================================================================
|
en
| 0.381836
|
# -*- coding: utf-8 -*- Disaster Victim Identification, Controllers # ----------------------------------------------------------------------------- # @todo: rewrite this for new framework # ----------------------------------------------------------------------------- Module's Home Page # ----------------------------------------------------------------------------- Recovery Requests List # ----------------------------------------------------------------------------- Morgue Registry # Pre-processor # Function to call for all Site Instance Types # ----------------------------------------------------------------------------- Dead Bodies Registry # ----------------------------------------------------------------------------- Missing Persons Registry (Match Finder) #subtitle_list = T("Candidate Matches for Body %s" % label), # @ToDo: Add to crud_fields # Show only missing persons in list views # ------------------------------------------------------------------------- Get a query for candidate matches between the missing persons registry and a dead body @param body_id: the dvi_body record ID # last seen should be before date of recovery # age group should match # gender should match # ----------------------------------------------------------------------------- Ajax Tooltips # END =========================================================================
| 2.10786
| 2
|
Solutions/pyodbcplaystocksTask7.py
|
DuongDo-Intersystems/node-odbc
| 0
|
6629169
|
<filename>Solutions/pyodbcplaystocksTask7.py
"""
PURPOSE: Simulate adding stocks to your stock portfolio and see how you would have done.
NOTES: When running the application,
1. Choose option 1 to view top 10 stocks.
2. Choose option 3 to add 2 or 3 stocks to your portfolio (using names from top 10 and 2016-08-12).
3. Choose option 6 using date 2017-08-10 to view your % Gain or Loss after a year.
"""
from datetime import datetime
import pyodbc
# Find top 10 stocks on a particular date
def find_top_on_date(connection, date):
cursor = connection.cursor()
sql = "SELECT distinct top 10 transdate,name,stockclose,stockopen,high,low,volume FROM Demo.Stock " \
"WHERE transdate = ? ORDER BY stockclose desc"
print("Date\t\tName\tOpening Price\tDaily High\tDaily Low\tClosing Price\tVolume")
rows = cursor.execute(sql, datetime.strptime(date, "%Y-%m-%d"))
for row in rows:
for item in row:
print("{}\t".format(item), end='')
print("")
# Create portfolio table
def create_portfolio_table(connection):
cursor = connection.cursor()
create_table = "CREATE TABLE Demo.Portfolio(Name varchar(50) unique, PurchaseDate date, " \
"PurchasePrice numeric(10,4), Shares int, DateTimeUpdated datetime)"
try:
cursor.execute(create_table)
print("Created Demo.Portfolio table successfully.")
connection.commit()
except Exception as e:
print("Error creating portfolio: " + str(e))
# Add item to portfolio
def add_portfolio_item(connection, name, purchase_date, price, shares):
try:
sql = "INSERT INTO Demo.Portfolio (name,PurchaseDate,PurchasePrice,Shares,DateTimeUpdated) VALUES (?,?,?,?,?)"
stock_name = name.encode('utf-8')
cursor = connection.cursor()
purchase_date = datetime.strptime(purchase_date, "%Y-%m-%d")
current_time = datetime.now()
cursor.execute(sql, stock_name, purchase_date, float(price), float(shares), current_time)
print("Added new line item for stock: {}.".format(name))
connection.commit()
except Exception as e:
print("Error adding to portfolio: " + str(e))
# Update item in portfolio
def update_portfolio_item(connection, name, purchase_date, price, shares):
sql = "UPDATE Demo.Portfolio SET purchaseDate = ?, purchasePrice= ?, shares = ?, DateTimeUpdated= ? WHERE name= ?"
cursor = connection.cursor()
stock_name = name.encode('utf-8')
purchase_date = datetime.strptime(purchase_date, "%Y-%m-%d")
current_time = datetime.now()
cursor.execute(sql, purchase_date, float(price), float(shares), current_time, stock_name)
if cursor.rowcount > 0:
print("Updated {} successfully.".format(name))
else:
print("{} does not exist.".format(name))
connection.commit()
# Delete item from portfolio
def delete_portfolio_table(connection, name):
sql = "DELETE FROM Demo.Portfolio WHERE name = ?"
cursor = connection.cursor()
stock_name = name.encode('utf-8')
cursor.execute(sql, stock_name)
if cursor.rowcount > 0:
print("Deleted {} successfully.".format(name))
else:
print("{} does not exist.".format(name))
connection.commit()
# View your portfolio to know % Gain or Loss
def view_portfolio_table(connection, trans_date):
sql = "SELECT pf.name, pf.purchaseprice, pf.purchaseDate, st.stockclose, pf.shares, pf.DateTimeUpdated " \
"FROM Demo.Portfolio as pf JOIN Demo.Stock as st on st.name = pf.name WHERE st.Transdate = ?"
cursor = connection.cursor()
rows = cursor.execute(sql, datetime.strptime(trans_date, "%Y-%m-%d"))
print("Name\tPurchase Date\tPurchase Price\tStock Close\tShares\tDatetime Updated\t\t% Change\tGain or Loss")
for row in rows:
name = row[0]
purchase_price = row[1]
purchase_date = row[2]
stock_close = row[3]
shares = row[4]
current_time = row[5]
percent_change = (stock_close - purchase_price) / purchase_price * 100
start_value = purchase_price * shares
end_value = stock_close * shares
gain_or_loss = round(end_value - start_value, 2)
print("{}\t{}\t{}\t\t{}\t{}\t{}\t{}\t{}"
.format(name, purchase_date, purchase_price, stock_close,
shares, current_time, percent_change, gain_or_loss))
# Task 2: View top 10 stocks for selected date
# Note: Choose 2016/08/12 for date
def task_view_top10_stock(connection):
date = input("On which date? (YYYY-MM-DD) ")
find_top_on_date(connection, date)
# Task 3: Create Portfolio Table
def task_create_portfolio(connection):
print("Create portfolio")
create_portfolio_table(connection)
# Task 4: Add item to Portfolio table
# Note: Choose stock name using list of stocks generated by Task 2
def task_add_to_portfolio(connection):
print("Add to portfolio")
name = input("Name: ")
date = input("Date: ")
price = input("Price: ")
shares = input("Number of shares: ")
add_portfolio_item(connection, name, date, price, shares)
# Task 5: Update item in Portfolio table
def task_update_portfolio(connection):
print("Update portfolio")
name = input("Which stock you are going to update: ")
date = input("New Date: ")
price = input("New Price: ")
shares = input("New Number of shares: ")
update_portfolio_item(connection, name, date, price, shares)
# Task 6: Delete item from Portfolio table
def task_delete_portfolio(connection):
print("Delete from portfolio")
name = input("Which stock you want to delete? ")
delete_portfolio_table(connection, name)
# Task 7: View your Portfolio to see how much you gain/loss
# Note: Choose option 3 to add 2 or 3 stocks to your portfolio (using names from top 10 and 2016-08-12 as date);
# then choose option 6 using date 2017-08-10 to view your % Gain or Loss after a year.
def task_view_porfolio(connection):
print("View portfolio")
trans_date = input("Selling on which date? (YYYY-MM-DD) ")
view_portfolio_table(connection, trans_date)
# Execute task based on user input
def execute_selection(selection, connection):
if selection == 1:
task_view_top10_stock(connection)
elif selection == 2:
task_create_portfolio(connection)
elif selection == 3:
task_add_to_portfolio(connection)
elif selection == 4:
task_update_portfolio(connection)
elif selection == 5:
task_delete_portfolio(connection)
elif selection == 6:
task_view_porfolio(connection)
# Get connection details from config file
def get_connection_info(file_name):
# Initial empty dictionary to store connection details
connections = {}
# Open config file to get connection info
with open(file_name) as f:
lines = f.readlines()
for line in lines:
# remove all white space (space, tab, new line)
line = ''.join(line.split())
# get connection info
connection_param, connection_value = line.split(":")
connections[connection_param] = connection_value
return connections
def run():
# Retrieve connection information from configuration file
connection_detail = get_connection_info("connection.config")
ip = connection_detail["ip"]
port = int(connection_detail["port"])
namespace = connection_detail["namespace"]
username = connection_detail["username"]
password = connection_detail["password"]
driver = "{InterSystems ODBC}"
# Create connection to InterSystems IRIS
connection_string = 'DRIVER={};SERVER={};PORT={};DATABASE={};UID={};PWD={}' \
.format(driver, ip, port, namespace, username, password)
connection = pyodbc.connect(connection_string)
print("Connected to InterSystems IRIS")
# Starting interactive prompt
while True:
print("1. View top 10")
print("2. Create Portfolio table")
print("3. Add to Portfolio")
print("4. Update Portfolio")
print("5. Delete from Portfolio")
print("6. View Portfolio")
print("7. Quit")
selection = int(input("What would you like to do? "))
if selection == 7:
break
elif selection not in range(1, 8):
print("Invalid option. Try again!")
continue
execute_selection(selection, connection)
if __name__ == '__main__':
run()
|
<filename>Solutions/pyodbcplaystocksTask7.py
"""
PURPOSE: Simulate adding stocks to your stock portfolio and see how you would have done.
NOTES: When running the application,
1. Choose option 1 to view top 10 stocks.
2. Choose option 3 to add 2 or 3 stocks to your portfolio (using names from top 10 and 2016-08-12).
3. Choose option 6 using date 2017-08-10 to view your % Gain or Loss after a year.
"""
from datetime import datetime
import pyodbc
# Find top 10 stocks on a particular date
def find_top_on_date(connection, date):
cursor = connection.cursor()
sql = "SELECT distinct top 10 transdate,name,stockclose,stockopen,high,low,volume FROM Demo.Stock " \
"WHERE transdate = ? ORDER BY stockclose desc"
print("Date\t\tName\tOpening Price\tDaily High\tDaily Low\tClosing Price\tVolume")
rows = cursor.execute(sql, datetime.strptime(date, "%Y-%m-%d"))
for row in rows:
for item in row:
print("{}\t".format(item), end='')
print("")
# Create portfolio table
def create_portfolio_table(connection):
cursor = connection.cursor()
create_table = "CREATE TABLE Demo.Portfolio(Name varchar(50) unique, PurchaseDate date, " \
"PurchasePrice numeric(10,4), Shares int, DateTimeUpdated datetime)"
try:
cursor.execute(create_table)
print("Created Demo.Portfolio table successfully.")
connection.commit()
except Exception as e:
print("Error creating portfolio: " + str(e))
# Add item to portfolio
def add_portfolio_item(connection, name, purchase_date, price, shares):
try:
sql = "INSERT INTO Demo.Portfolio (name,PurchaseDate,PurchasePrice,Shares,DateTimeUpdated) VALUES (?,?,?,?,?)"
stock_name = name.encode('utf-8')
cursor = connection.cursor()
purchase_date = datetime.strptime(purchase_date, "%Y-%m-%d")
current_time = datetime.now()
cursor.execute(sql, stock_name, purchase_date, float(price), float(shares), current_time)
print("Added new line item for stock: {}.".format(name))
connection.commit()
except Exception as e:
print("Error adding to portfolio: " + str(e))
# Update item in portfolio
def update_portfolio_item(connection, name, purchase_date, price, shares):
sql = "UPDATE Demo.Portfolio SET purchaseDate = ?, purchasePrice= ?, shares = ?, DateTimeUpdated= ? WHERE name= ?"
cursor = connection.cursor()
stock_name = name.encode('utf-8')
purchase_date = datetime.strptime(purchase_date, "%Y-%m-%d")
current_time = datetime.now()
cursor.execute(sql, purchase_date, float(price), float(shares), current_time, stock_name)
if cursor.rowcount > 0:
print("Updated {} successfully.".format(name))
else:
print("{} does not exist.".format(name))
connection.commit()
# Delete item from portfolio
def delete_portfolio_table(connection, name):
sql = "DELETE FROM Demo.Portfolio WHERE name = ?"
cursor = connection.cursor()
stock_name = name.encode('utf-8')
cursor.execute(sql, stock_name)
if cursor.rowcount > 0:
print("Deleted {} successfully.".format(name))
else:
print("{} does not exist.".format(name))
connection.commit()
# View your portfolio to know % Gain or Loss
def view_portfolio_table(connection, trans_date):
sql = "SELECT pf.name, pf.purchaseprice, pf.purchaseDate, st.stockclose, pf.shares, pf.DateTimeUpdated " \
"FROM Demo.Portfolio as pf JOIN Demo.Stock as st on st.name = pf.name WHERE st.Transdate = ?"
cursor = connection.cursor()
rows = cursor.execute(sql, datetime.strptime(trans_date, "%Y-%m-%d"))
print("Name\tPurchase Date\tPurchase Price\tStock Close\tShares\tDatetime Updated\t\t% Change\tGain or Loss")
for row in rows:
name = row[0]
purchase_price = row[1]
purchase_date = row[2]
stock_close = row[3]
shares = row[4]
current_time = row[5]
percent_change = (stock_close - purchase_price) / purchase_price * 100
start_value = purchase_price * shares
end_value = stock_close * shares
gain_or_loss = round(end_value - start_value, 2)
print("{}\t{}\t{}\t\t{}\t{}\t{}\t{}\t{}"
.format(name, purchase_date, purchase_price, stock_close,
shares, current_time, percent_change, gain_or_loss))
# Task 2: View top 10 stocks for selected date
# Note: Choose 2016/08/12 for date
def task_view_top10_stock(connection):
date = input("On which date? (YYYY-MM-DD) ")
find_top_on_date(connection, date)
# Task 3: Create Portfolio Table
def task_create_portfolio(connection):
print("Create portfolio")
create_portfolio_table(connection)
# Task 4: Add item to Portfolio table
# Note: Choose stock name using list of stocks generated by Task 2
def task_add_to_portfolio(connection):
print("Add to portfolio")
name = input("Name: ")
date = input("Date: ")
price = input("Price: ")
shares = input("Number of shares: ")
add_portfolio_item(connection, name, date, price, shares)
# Task 5: Update item in Portfolio table
def task_update_portfolio(connection):
print("Update portfolio")
name = input("Which stock you are going to update: ")
date = input("New Date: ")
price = input("New Price: ")
shares = input("New Number of shares: ")
update_portfolio_item(connection, name, date, price, shares)
# Task 6: Delete item from Portfolio table
def task_delete_portfolio(connection):
print("Delete from portfolio")
name = input("Which stock you want to delete? ")
delete_portfolio_table(connection, name)
# Task 7: View your Portfolio to see how much you gain/loss
# Note: Choose option 3 to add 2 or 3 stocks to your portfolio (using names from top 10 and 2016-08-12 as date);
# then choose option 6 using date 2017-08-10 to view your % Gain or Loss after a year.
def task_view_porfolio(connection):
print("View portfolio")
trans_date = input("Selling on which date? (YYYY-MM-DD) ")
view_portfolio_table(connection, trans_date)
# Execute task based on user input
def execute_selection(selection, connection):
if selection == 1:
task_view_top10_stock(connection)
elif selection == 2:
task_create_portfolio(connection)
elif selection == 3:
task_add_to_portfolio(connection)
elif selection == 4:
task_update_portfolio(connection)
elif selection == 5:
task_delete_portfolio(connection)
elif selection == 6:
task_view_porfolio(connection)
# Get connection details from config file
def get_connection_info(file_name):
# Initial empty dictionary to store connection details
connections = {}
# Open config file to get connection info
with open(file_name) as f:
lines = f.readlines()
for line in lines:
# remove all white space (space, tab, new line)
line = ''.join(line.split())
# get connection info
connection_param, connection_value = line.split(":")
connections[connection_param] = connection_value
return connections
def run():
# Retrieve connection information from configuration file
connection_detail = get_connection_info("connection.config")
ip = connection_detail["ip"]
port = int(connection_detail["port"])
namespace = connection_detail["namespace"]
username = connection_detail["username"]
password = connection_detail["password"]
driver = "{InterSystems ODBC}"
# Create connection to InterSystems IRIS
connection_string = 'DRIVER={};SERVER={};PORT={};DATABASE={};UID={};PWD={}' \
.format(driver, ip, port, namespace, username, password)
connection = pyodbc.connect(connection_string)
print("Connected to InterSystems IRIS")
# Starting interactive prompt
while True:
print("1. View top 10")
print("2. Create Portfolio table")
print("3. Add to Portfolio")
print("4. Update Portfolio")
print("5. Delete from Portfolio")
print("6. View Portfolio")
print("7. Quit")
selection = int(input("What would you like to do? "))
if selection == 7:
break
elif selection not in range(1, 8):
print("Invalid option. Try again!")
continue
execute_selection(selection, connection)
if __name__ == '__main__':
run()
|
en
| 0.768998
|
PURPOSE: Simulate adding stocks to your stock portfolio and see how you would have done. NOTES: When running the application, 1. Choose option 1 to view top 10 stocks. 2. Choose option 3 to add 2 or 3 stocks to your portfolio (using names from top 10 and 2016-08-12). 3. Choose option 6 using date 2017-08-10 to view your % Gain or Loss after a year. # Find top 10 stocks on a particular date # Create portfolio table # Add item to portfolio # Update item in portfolio # Delete item from portfolio # View your portfolio to know % Gain or Loss # Task 2: View top 10 stocks for selected date # Note: Choose 2016/08/12 for date # Task 3: Create Portfolio Table # Task 4: Add item to Portfolio table # Note: Choose stock name using list of stocks generated by Task 2 # Task 5: Update item in Portfolio table # Task 6: Delete item from Portfolio table # Task 7: View your Portfolio to see how much you gain/loss # Note: Choose option 3 to add 2 or 3 stocks to your portfolio (using names from top 10 and 2016-08-12 as date); # then choose option 6 using date 2017-08-10 to view your % Gain or Loss after a year. # Execute task based on user input # Get connection details from config file # Initial empty dictionary to store connection details # Open config file to get connection info # remove all white space (space, tab, new line) # get connection info # Retrieve connection information from configuration file # Create connection to InterSystems IRIS # Starting interactive prompt
| 3.623361
| 4
|
main.py
|
cottongin/twr-discord-bot
| 1
|
6629170
|
<reponame>cottongin/twr-discord-bot<gh_stars>1-10
import discord
from discord.ext import commands
import os, sys, traceback
from dotenv import load_dotenv
load_dotenv()
"""Based on https://gist.github.com/EvieePy/d78c061a4798ae81be9825468fe146be"""
def get_prefix(bot, message):
prefixes = ['!', '.']
return commands.when_mentioned_or(*prefixes)(bot, message)
initial_extensions = ['plugins.chaintracker',
'plugins.faction',
'plugins.misc',
'plugins.owner',
'plugins.weather']
bot = commands.Bot(command_prefix=get_prefix, description='TWR Discord bot (maintained by cottongin)')
if __name__ == '__main__':
for extension in initial_extensions:
try:
bot.load_extension(extension)
except Exception as e:
print(f'Failed to load extension {extension}.', file=sys.stderr)
traceback.print_exc()
@bot.event
async def on_ready():
print(f'\n\nLogged in as: {bot.user.name} - {bot.user.id}\nVersion: {discord.__version__}\n')
activity = discord.Game(name='TORN CITY')
await bot.change_presence(activity=activity)
print(f'Successfully logged in and booted...!')
token = os.getenv("BOT_SECRET")
#print(token)
bot.run(token, bot=True, reconnect=True)
|
import discord
from discord.ext import commands
import os, sys, traceback
from dotenv import load_dotenv
load_dotenv()
"""Based on https://gist.github.com/EvieePy/d78c061a4798ae81be9825468fe146be"""
def get_prefix(bot, message):
prefixes = ['!', '.']
return commands.when_mentioned_or(*prefixes)(bot, message)
initial_extensions = ['plugins.chaintracker',
'plugins.faction',
'plugins.misc',
'plugins.owner',
'plugins.weather']
bot = commands.Bot(command_prefix=get_prefix, description='TWR Discord bot (maintained by cottongin)')
if __name__ == '__main__':
for extension in initial_extensions:
try:
bot.load_extension(extension)
except Exception as e:
print(f'Failed to load extension {extension}.', file=sys.stderr)
traceback.print_exc()
@bot.event
async def on_ready():
print(f'\n\nLogged in as: {bot.user.name} - {bot.user.id}\nVersion: {discord.__version__}\n')
activity = discord.Game(name='TORN CITY')
await bot.change_presence(activity=activity)
print(f'Successfully logged in and booted...!')
token = os.getenv("BOT_SECRET")
#print(token)
bot.run(token, bot=True, reconnect=True)
|
en
| 0.63211
|
Based on https://gist.github.com/EvieePy/d78c061a4798ae81be9825468fe146be #print(token)
| 2.348495
| 2
|
ternary_operator.py
|
NathanKr/python-playground
| 0
|
6629171
|
flag = False
msg = 'flag is True' if flag else 'flag is False'
print(msg)
|
flag = False
msg = 'flag is True' if flag else 'flag is False'
print(msg)
|
none
| 1
| 2.504992
| 3
|
|
src/model/verify_vsrl.py
|
tengyu-liu/Part-GPNN
| 0
|
6629172
|
import datetime
import os
import pickle
import random
import sys
import time
import numpy as np
from config import flags
from dataloader_parallel import DataLoader
import metadata
import metrics
import vsrl_eval
vcoco_root = '/home/tengyu/Data/mscoco/v-coco'
def get_vcocoeval(imageset):
return vsrl_eval.VCOCOeval(os.path.join(vcoco_root, 'data/vcoco/vcoco_{}.json'.format(imageset)),
os.path.join(vcoco_root, 'data/instances_vcoco_all_2014.json'),
os.path.join(vcoco_root, 'data/splits/vcoco_{}.ids'.format(imageset)))
def vcoco_evaluation(vcocoeval, imageset, all_results, name, method):
print('\n%s: '%method, end='')
det_file = os.path.join(os.path.dirname(__file__), 'eval', name, '%s_detections[%s].pkl'%(imageset, method))
pickle.dump(all_results, open(det_file, 'wb'))
vcocoeval._do_eval(det_file, ovr_thresh=0.5)
print()
train_vcocoeval = get_vcocoeval('train')
# val_vcocoeval = get_vcocoeval('val')
# test_vcocoeval = get_vcocoeval('test')
random.seed(0)
np.random.seed(0)
obj_action_pair = pickle.load(open(os.path.join(os.path.dirname(__file__), 'data', 'obj_action_pairs.pkl'), 'rb'))
train_loader = DataLoader('train', flags.node_num, negative_suppression=flags.negative_suppression, n_jobs=flags.n_jobs, part_weight=flags.part_weight)
# val_loader = DataLoader('val', flags.node_num, negative_suppression=flags.negative_suppression, n_jobs=flags.n_jobs, part_weight=flags.part_weight)
# test_loader = DataLoader('test', flags.node_num, negative_suppression=flags.negative_suppression, n_jobs=flags.n_jobs, part_weight=flags.part_weight)
train_loader.shuffle()
train_loader.prefetch()
item = 0
total_data_time = 0
total_tf_time = 0
all_results_sum = []
all_results_mean = []
all_results_max = []
while True:
t0 = time.time()
res = train_loader.fetch()
if res is None:
break
node_features, edge_features, adj_mat, gt_action_labels, gt_action_roles, gt_strength_level, \
part_human_ids, human_boxes, pairwise_label_mask, batch_node_num, fns, \
obj_nums, part_nums, obj_boxes, obj_classes, img_ids = res
all_results_sum, all_results_max, all_results_mean = metrics.append_results(
all_results_sum, all_results_max, all_results_mean, human_boxes,
part_human_ids, gt_action_labels, gt_action_roles, obj_nums, obj_boxes, obj_classes, img_ids)
vcoco_evaluation(train_vcocoeval, 'train', all_results_sum, flags.name, 'SUM')
# vcoco_evaluation(train_vcocoeval, 'train', all_results_max, flags.name, 'MAX')
# vcoco_evaluation(train_vcocoeval, 'train', all_results_mean, flags.name, 'MEAN')
|
import datetime
import os
import pickle
import random
import sys
import time
import numpy as np
from config import flags
from dataloader_parallel import DataLoader
import metadata
import metrics
import vsrl_eval
vcoco_root = '/home/tengyu/Data/mscoco/v-coco'
def get_vcocoeval(imageset):
return vsrl_eval.VCOCOeval(os.path.join(vcoco_root, 'data/vcoco/vcoco_{}.json'.format(imageset)),
os.path.join(vcoco_root, 'data/instances_vcoco_all_2014.json'),
os.path.join(vcoco_root, 'data/splits/vcoco_{}.ids'.format(imageset)))
def vcoco_evaluation(vcocoeval, imageset, all_results, name, method):
print('\n%s: '%method, end='')
det_file = os.path.join(os.path.dirname(__file__), 'eval', name, '%s_detections[%s].pkl'%(imageset, method))
pickle.dump(all_results, open(det_file, 'wb'))
vcocoeval._do_eval(det_file, ovr_thresh=0.5)
print()
train_vcocoeval = get_vcocoeval('train')
# val_vcocoeval = get_vcocoeval('val')
# test_vcocoeval = get_vcocoeval('test')
random.seed(0)
np.random.seed(0)
obj_action_pair = pickle.load(open(os.path.join(os.path.dirname(__file__), 'data', 'obj_action_pairs.pkl'), 'rb'))
train_loader = DataLoader('train', flags.node_num, negative_suppression=flags.negative_suppression, n_jobs=flags.n_jobs, part_weight=flags.part_weight)
# val_loader = DataLoader('val', flags.node_num, negative_suppression=flags.negative_suppression, n_jobs=flags.n_jobs, part_weight=flags.part_weight)
# test_loader = DataLoader('test', flags.node_num, negative_suppression=flags.negative_suppression, n_jobs=flags.n_jobs, part_weight=flags.part_weight)
train_loader.shuffle()
train_loader.prefetch()
item = 0
total_data_time = 0
total_tf_time = 0
all_results_sum = []
all_results_mean = []
all_results_max = []
while True:
t0 = time.time()
res = train_loader.fetch()
if res is None:
break
node_features, edge_features, adj_mat, gt_action_labels, gt_action_roles, gt_strength_level, \
part_human_ids, human_boxes, pairwise_label_mask, batch_node_num, fns, \
obj_nums, part_nums, obj_boxes, obj_classes, img_ids = res
all_results_sum, all_results_max, all_results_mean = metrics.append_results(
all_results_sum, all_results_max, all_results_mean, human_boxes,
part_human_ids, gt_action_labels, gt_action_roles, obj_nums, obj_boxes, obj_classes, img_ids)
vcoco_evaluation(train_vcocoeval, 'train', all_results_sum, flags.name, 'SUM')
# vcoco_evaluation(train_vcocoeval, 'train', all_results_max, flags.name, 'MAX')
# vcoco_evaluation(train_vcocoeval, 'train', all_results_mean, flags.name, 'MEAN')
|
en
| 0.169527
|
# val_vcocoeval = get_vcocoeval('val') # test_vcocoeval = get_vcocoeval('test') # val_loader = DataLoader('val', flags.node_num, negative_suppression=flags.negative_suppression, n_jobs=flags.n_jobs, part_weight=flags.part_weight) # test_loader = DataLoader('test', flags.node_num, negative_suppression=flags.negative_suppression, n_jobs=flags.n_jobs, part_weight=flags.part_weight) # vcoco_evaluation(train_vcocoeval, 'train', all_results_max, flags.name, 'MAX') # vcoco_evaluation(train_vcocoeval, 'train', all_results_mean, flags.name, 'MEAN')
| 1.85557
| 2
|
__init__.py
|
muhareb/rasa-chat
| 0
|
6629173
|
# Mycroft skill that acts as an interface between a Rasa chatbot and a user,
# allowing continuous voice dialog between the two
# Much thanks to Jamesmf for the code base and <NAME> for the technical advice
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler, intent_file_handler
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
import requests
from threading import Event
class ChatWithRasaSkill(MycroftSkill):
def __init__(self):
super(ChatWithRasaSkill, self).__init__(name="ChatWithRasaSkill")
def initialize(self):
# Set the address of your Rasa's REST endpoint
self.conversation_active = False
self.convoID = 1
#self.RASA_API = "http://localhost:5005/webhooks/rest/webhook"
self.RASA_API = "http://asrajeh.ddns.net:5005/webhooks/rest/webhook"
self.messages = []
def query_rasa(self, prompt=None):
event=Event()
if self.conversation_active == False:
return
if prompt is None and len(self.messages) > 0:
prompt = self.messages[-1]
# Speak message to user and save the response
msg = self.get_response(prompt, num_retries=0)
i=0
notFound=True
if msg is None:
while (notFound and (i<2)):
msg = self.get_response(dialog='..', num_retries=0)
if msg is not None:
break
i+=1
# If user doesn't respond, quietly stop, allowing user to resume later
if msg is None:
return
# Else reset messages
self.messages = []
# Send post requests to said endpoint using the below format.
# "sender" is used to keep track of dialog streams for different users
data = requests.post(
self.RASA_API, json = {"message" : msg, "sender" : "user{}".format(self.convoID)}
)
#print(data)
#self.speak_dialog(data.json())
# A JSON Array Object is returned: each element has a user field along
# with a text, image, or other resource field signifying the output
# print(json.dumps(data.json(), indent=2))
for next_response in data.json():
if "text" in next_response:
self.messages.append(next_response["text"])
# Output all but one of the Rasa dialogs
if len(self.messages) > 1:
for rasa_message in self.messages[:-1]:
print(rasa_message)
# Kills code when Rasa stop responding
if len(self.messages) == 0:
self.messages = ["no response from rasa"]
return
# Use the last dialog from Rasa to prompt for next input from the user
prompt = self.messages[-1]
# If user doesn't respond, quietly stop, allowing user to resume later
if msg == 'يعطيك العافيه':
# Speak message to user and save the response
self.get_response(prompt, num_retries=0)
return
# Allows a stream of user inputs by re-calling query_rasa recursively
# It will only stop when either user or Rasa stops providing data
return self.query_rasa(prompt)
@intent_file_handler("Chatwithrasa.intent")
def handle_talk_to_rasa_intent(self, message):
print('hi')
self.convoID += 1
self.conversation_active = True
prompt = "مرحبا بك في التشات بوت الصحي"
self.query_rasa(prompt)
# Resume chat activator that would resume the conversation thread of a chat
@intent_file_handler("Resume.intent")
def handle_resume_chat(self, message):
self.conversation_active = True
self.query_rasa()
def stop(self):
self.conversation_active = False
def create_skill():
return ChatWithRasaSkill()
|
# Mycroft skill that acts as an interface between a Rasa chatbot and a user,
# allowing continuous voice dialog between the two
# Much thanks to Jamesmf for the code base and <NAME> for the technical advice
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler, intent_file_handler
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
import requests
from threading import Event
class ChatWithRasaSkill(MycroftSkill):
def __init__(self):
super(ChatWithRasaSkill, self).__init__(name="ChatWithRasaSkill")
def initialize(self):
# Set the address of your Rasa's REST endpoint
self.conversation_active = False
self.convoID = 1
#self.RASA_API = "http://localhost:5005/webhooks/rest/webhook"
self.RASA_API = "http://asrajeh.ddns.net:5005/webhooks/rest/webhook"
self.messages = []
def query_rasa(self, prompt=None):
event=Event()
if self.conversation_active == False:
return
if prompt is None and len(self.messages) > 0:
prompt = self.messages[-1]
# Speak message to user and save the response
msg = self.get_response(prompt, num_retries=0)
i=0
notFound=True
if msg is None:
while (notFound and (i<2)):
msg = self.get_response(dialog='..', num_retries=0)
if msg is not None:
break
i+=1
# If user doesn't respond, quietly stop, allowing user to resume later
if msg is None:
return
# Else reset messages
self.messages = []
# Send post requests to said endpoint using the below format.
# "sender" is used to keep track of dialog streams for different users
data = requests.post(
self.RASA_API, json = {"message" : msg, "sender" : "user{}".format(self.convoID)}
)
#print(data)
#self.speak_dialog(data.json())
# A JSON Array Object is returned: each element has a user field along
# with a text, image, or other resource field signifying the output
# print(json.dumps(data.json(), indent=2))
for next_response in data.json():
if "text" in next_response:
self.messages.append(next_response["text"])
# Output all but one of the Rasa dialogs
if len(self.messages) > 1:
for rasa_message in self.messages[:-1]:
print(rasa_message)
# Kills code when Rasa stop responding
if len(self.messages) == 0:
self.messages = ["no response from rasa"]
return
# Use the last dialog from Rasa to prompt for next input from the user
prompt = self.messages[-1]
# If user doesn't respond, quietly stop, allowing user to resume later
if msg == 'يعطيك العافيه':
# Speak message to user and save the response
self.get_response(prompt, num_retries=0)
return
# Allows a stream of user inputs by re-calling query_rasa recursively
# It will only stop when either user or Rasa stops providing data
return self.query_rasa(prompt)
@intent_file_handler("Chatwithrasa.intent")
def handle_talk_to_rasa_intent(self, message):
print('hi')
self.convoID += 1
self.conversation_active = True
prompt = "مرحبا بك في التشات بوت الصحي"
self.query_rasa(prompt)
# Resume chat activator that would resume the conversation thread of a chat
@intent_file_handler("Resume.intent")
def handle_resume_chat(self, message):
self.conversation_active = True
self.query_rasa()
def stop(self):
self.conversation_active = False
def create_skill():
return ChatWithRasaSkill()
|
en
| 0.865468
|
# Mycroft skill that acts as an interface between a Rasa chatbot and a user, # allowing continuous voice dialog between the two # Much thanks to Jamesmf for the code base and <NAME> for the technical advice # Set the address of your Rasa's REST endpoint #self.RASA_API = "http://localhost:5005/webhooks/rest/webhook" # Speak message to user and save the response # If user doesn't respond, quietly stop, allowing user to resume later # Else reset messages # Send post requests to said endpoint using the below format. # "sender" is used to keep track of dialog streams for different users #print(data) #self.speak_dialog(data.json()) # A JSON Array Object is returned: each element has a user field along # with a text, image, or other resource field signifying the output # print(json.dumps(data.json(), indent=2)) # Output all but one of the Rasa dialogs # Kills code when Rasa stop responding # Use the last dialog from Rasa to prompt for next input from the user # If user doesn't respond, quietly stop, allowing user to resume later # Speak message to user and save the response # Allows a stream of user inputs by re-calling query_rasa recursively # It will only stop when either user or Rasa stops providing data # Resume chat activator that would resume the conversation thread of a chat
| 2.793968
| 3
|
ToPCLI/Todoist.py
|
NAndLib/todoist-plugable-cli
| 6
|
6629174
|
<reponame>NAndLib/todoist-plugable-cli<filename>ToPCLI/Todoist.py
from config import token, cache_dir
from contextlib import contextmanager
import sys
import todoist
CODE_TO_COLORS = {
30 : 'BERRY_RED',
31 : 'RED',
32 : 'ORANGE',
33 : 'YELLOW',
34 : 'OLIVE_GREEN',
35 : 'LIME_GREEN',
36 : 'GREEN',
37 : 'MINT_GREEN',
38 : 'TEAL',
39 : 'SKY_BLUE',
40 : 'LIGHT_BLUE',
41 : 'BLUE',
42 : 'GRAPE',
43 : 'VIOLET',
44 : 'LAVENDER',
45 : 'MAGENTA',
46 : 'SALMON',
47 : 'CHARCOAL',
48 : 'GREY',
49 : 'TAUPE',
}
COLORS_TO_CODE = {
'BERRY_RED' : 30,
'RED' : 31,
'ORANGE' : 32,
'YELLOW' : 33,
'OLIVE_GREEN' : 34,
'LIME_GREEN' : 35,
'GREEN' : 36,
'MINT_GREEN' : 37,
'TEAL' : 38,
'SKY_BLUE' : 39,
'LIGHT_BLUE' : 40,
'BLUE' : 41,
'GRAPE' : 42,
'VIOLET' : 43,
'LAVENDER' : 44,
'MAGENTA' : 45,
'SALMON' : 46,
'CHARCOAL' : 47,
'GREY' : 48,
'TAUPE' : 49,
}
PRIORITY_TO_LEVEL = {
'p1' : 4,
'p2' : 3,
'p3' : 2,
'p4' : 1
}
LEVEL_TO_PRIORITY = {
4 : 'p1',
3 : 'p2',
2 : 'p3',
1 : 'p4'
}
class Todoist(object):
def __init__(self, batch_mode=False):
self.api = todoist.api.TodoistAPI(token, cache=cache_dir)
self._batch_mode = batch_mode
self.sync()
def sync(self):
"""
Only sync if batch_mode is False.
"""
if self._batch_mode:
return
self.api.sync()
def commit(self):
"""
Only commit if batch_mode is False.
"""
if self._batch_mode:
return
self.api.commit()
def _api_func(func):
def run_func(self, *args, **kwargs):
try:
result = func(self, *args, **kwargs)
except Exception as e:
print("Failed to run command.", file=sys.stderr)
print(e)
sys.exit(1)
return result
return run_func
def _get_cmd(self, type, command):
"""
Gets the correct command for the specified type.
"""
cmd_type = getattr(self.api, type)
return getattr(cmd_type, command)
def batch_mode_is(self, state):
"""
Sync and commit changes before toggling batch_mode
"""
if not state:
self.api.commit()
self._batch_mode = state
@contextmanager
def batch_mode(self):
self.batch_mode_is(True)
yield
self.batch_mode_is(False)
@_api_func
def get_state(self, item=None, *ids):
"""
Returns a dictionary of the current state, keyed by ID.
- item: return the state of this specific item.
- ids: only return objects matching the given ids.
If no item is provided, the entire state is returned. Not keyed by ID.
"""
if not item and not properties:
return self.api.state
else:
return self.api.state[item]
@_api_func
def get(self, type, id):
"""
Gets "type" by id, returns None if type is not found.
Only searches the local state if in batch mode
"""
get_by_id = self._get_cmd(type, 'get_by_id')
return get_by_id(id, only_local=(self._batch_mode))
@_api_func
def do(self, type, action, id, **kwargs):
"""
Perform the "type" "action" on the given id with the given args.
- type: api action types, can be projects, items, labels, quick, etc.
- action: the action to perform, can be complete, update, archive,
etc.
- id: the id of the object.
- kwargs: arguments for the action to run.
"""
cmd = self._get_cmd(type, action)
return cmd(id, **kwargs)
@_api_func
def add(self, type, content, **kwargs):
"""
Add/Create a "type" object with "content".
- type: the type of object in Todoist. Types can be: projects, items,
notes, project_notes, labels, and reminders.
- content: the content of the object to be created.
- **kwargs: appropriate keyword args for the type.
"""
return self._get_cmd(type, 'add')(content, **kwargs)
|
from config import token, cache_dir
from contextlib import contextmanager
import sys
import todoist
CODE_TO_COLORS = {
30 : 'BERRY_RED',
31 : 'RED',
32 : 'ORANGE',
33 : 'YELLOW',
34 : 'OLIVE_GREEN',
35 : 'LIME_GREEN',
36 : 'GREEN',
37 : 'MINT_GREEN',
38 : 'TEAL',
39 : 'SKY_BLUE',
40 : 'LIGHT_BLUE',
41 : 'BLUE',
42 : 'GRAPE',
43 : 'VIOLET',
44 : 'LAVENDER',
45 : 'MAGENTA',
46 : 'SALMON',
47 : 'CHARCOAL',
48 : 'GREY',
49 : 'TAUPE',
}
COLORS_TO_CODE = {
'BERRY_RED' : 30,
'RED' : 31,
'ORANGE' : 32,
'YELLOW' : 33,
'OLIVE_GREEN' : 34,
'LIME_GREEN' : 35,
'GREEN' : 36,
'MINT_GREEN' : 37,
'TEAL' : 38,
'SKY_BLUE' : 39,
'LIGHT_BLUE' : 40,
'BLUE' : 41,
'GRAPE' : 42,
'VIOLET' : 43,
'LAVENDER' : 44,
'MAGENTA' : 45,
'SALMON' : 46,
'CHARCOAL' : 47,
'GREY' : 48,
'TAUPE' : 49,
}
PRIORITY_TO_LEVEL = {
'p1' : 4,
'p2' : 3,
'p3' : 2,
'p4' : 1
}
LEVEL_TO_PRIORITY = {
4 : 'p1',
3 : 'p2',
2 : 'p3',
1 : 'p4'
}
class Todoist(object):
def __init__(self, batch_mode=False):
self.api = todoist.api.TodoistAPI(token, cache=cache_dir)
self._batch_mode = batch_mode
self.sync()
def sync(self):
"""
Only sync if batch_mode is False.
"""
if self._batch_mode:
return
self.api.sync()
def commit(self):
"""
Only commit if batch_mode is False.
"""
if self._batch_mode:
return
self.api.commit()
def _api_func(func):
def run_func(self, *args, **kwargs):
try:
result = func(self, *args, **kwargs)
except Exception as e:
print("Failed to run command.", file=sys.stderr)
print(e)
sys.exit(1)
return result
return run_func
def _get_cmd(self, type, command):
"""
Gets the correct command for the specified type.
"""
cmd_type = getattr(self.api, type)
return getattr(cmd_type, command)
def batch_mode_is(self, state):
"""
Sync and commit changes before toggling batch_mode
"""
if not state:
self.api.commit()
self._batch_mode = state
@contextmanager
def batch_mode(self):
self.batch_mode_is(True)
yield
self.batch_mode_is(False)
@_api_func
def get_state(self, item=None, *ids):
"""
Returns a dictionary of the current state, keyed by ID.
- item: return the state of this specific item.
- ids: only return objects matching the given ids.
If no item is provided, the entire state is returned. Not keyed by ID.
"""
if not item and not properties:
return self.api.state
else:
return self.api.state[item]
@_api_func
def get(self, type, id):
"""
Gets "type" by id, returns None if type is not found.
Only searches the local state if in batch mode
"""
get_by_id = self._get_cmd(type, 'get_by_id')
return get_by_id(id, only_local=(self._batch_mode))
@_api_func
def do(self, type, action, id, **kwargs):
"""
Perform the "type" "action" on the given id with the given args.
- type: api action types, can be projects, items, labels, quick, etc.
- action: the action to perform, can be complete, update, archive,
etc.
- id: the id of the object.
- kwargs: arguments for the action to run.
"""
cmd = self._get_cmd(type, action)
return cmd(id, **kwargs)
@_api_func
def add(self, type, content, **kwargs):
"""
Add/Create a "type" object with "content".
- type: the type of object in Todoist. Types can be: projects, items,
notes, project_notes, labels, and reminders.
- content: the content of the object to be created.
- **kwargs: appropriate keyword args for the type.
"""
return self._get_cmd(type, 'add')(content, **kwargs)
|
en
| 0.760281
|
Only sync if batch_mode is False. Only commit if batch_mode is False. Gets the correct command for the specified type. Sync and commit changes before toggling batch_mode Returns a dictionary of the current state, keyed by ID. - item: return the state of this specific item. - ids: only return objects matching the given ids. If no item is provided, the entire state is returned. Not keyed by ID. Gets "type" by id, returns None if type is not found. Only searches the local state if in batch mode Perform the "type" "action" on the given id with the given args. - type: api action types, can be projects, items, labels, quick, etc. - action: the action to perform, can be complete, update, archive, etc. - id: the id of the object. - kwargs: arguments for the action to run. Add/Create a "type" object with "content". - type: the type of object in Todoist. Types can be: projects, items, notes, project_notes, labels, and reminders. - content: the content of the object to be created. - **kwargs: appropriate keyword args for the type.
| 2.45321
| 2
|
tests/core_tests/model_tests/position_dynamics_model_test.py
|
Cislunar-Explorers/CislunarSim
| 2
|
6629175
|
<reponame>Cislunar-Explorers/CislunarSim<filename>tests/core_tests/model_tests/position_dynamics_model_test.py
import unittest
from core.models.model_list import PositionDynamics
from core.state import StateTime
from utils.test_utils import state_1, d3456
class PositionDynamicsModelTest(unittest.TestCase):
"""
This class tests the position dynamics model implementation.
"""
def test_position_dynamics_model(self):
"""
This function tests the position dynamics model's d_state function.
"""
base_state = StateTime(state_1)
dummy_pd = PositionDynamics(d3456)
propagated_state = dummy_pd.d_state(base_state)
# trivial tests that check to make sure the velocity components from the input to the output match exactly
self.assertEqual(9.0, propagated_state["x"])
self.assertEqual(10.0, propagated_state["y"])
self.assertEqual(11.0, propagated_state["z"])
if __name__ == "__main__":
unittest.main()
|
import unittest
from core.models.model_list import PositionDynamics
from core.state import StateTime
from utils.test_utils import state_1, d3456
class PositionDynamicsModelTest(unittest.TestCase):
"""
This class tests the position dynamics model implementation.
"""
def test_position_dynamics_model(self):
"""
This function tests the position dynamics model's d_state function.
"""
base_state = StateTime(state_1)
dummy_pd = PositionDynamics(d3456)
propagated_state = dummy_pd.d_state(base_state)
# trivial tests that check to make sure the velocity components from the input to the output match exactly
self.assertEqual(9.0, propagated_state["x"])
self.assertEqual(10.0, propagated_state["y"])
self.assertEqual(11.0, propagated_state["z"])
if __name__ == "__main__":
unittest.main()
|
en
| 0.78453
|
This class tests the position dynamics model implementation. This function tests the position dynamics model's d_state function. # trivial tests that check to make sure the velocity components from the input to the output match exactly
| 3.12044
| 3
|
nmap_scannner.py
|
sandlib/pythonpentest
| 174
|
6629176
|
<filename>nmap_scannner.py<gh_stars>100-1000
#!/usr/bin/env python
'''
Author: <NAME>
Date: February 2015
Name: nmap_scanner.py
Purpose: To scan a network
Copyright (c) 2015, <NAME> All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
try:
import nmap
except:
sys.exit("[!] Install the nmap library: pip install python-nmap")
# Argument Validator
if len(sys.argv) != 3:
sys.exit("Please provide two arguments the first being the targets the second the ports")
ports = str(sys.argv[2])
addrs = str(sys.argv[1])
scanner = nmap.PortScanner()
scanner.scan(addrs, ports)
for host in scanner.all_hosts():
if “” in host:
print("The host's IP address is %s and it's hostname was not found") % (host, scanner[host].hostname())
else:
print("The host's IP address is %s and it's hostname is %s") % (host, scanner[host].hostname())
|
<filename>nmap_scannner.py<gh_stars>100-1000
#!/usr/bin/env python
'''
Author: <NAME>
Date: February 2015
Name: nmap_scanner.py
Purpose: To scan a network
Copyright (c) 2015, <NAME> All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
try:
import nmap
except:
sys.exit("[!] Install the nmap library: pip install python-nmap")
# Argument Validator
if len(sys.argv) != 3:
sys.exit("Please provide two arguments the first being the targets the second the ports")
ports = str(sys.argv[2])
addrs = str(sys.argv[1])
scanner = nmap.PortScanner()
scanner.scan(addrs, ports)
for host in scanner.all_hosts():
if “” in host:
print("The host's IP address is %s and it's hostname was not found") % (host, scanner[host].hostname())
else:
print("The host's IP address is %s and it's hostname is %s") % (host, scanner[host].hostname())
|
en
| 0.731109
|
#!/usr/bin/env python Author: <NAME> Date: February 2015 Name: nmap_scanner.py Purpose: To scan a network Copyright (c) 2015, <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Argument Validator
| 2.421221
| 2
|
app/__init__.py
|
ronyldo12/PowerDNS-Admin
| 1
|
6629177
|
<gh_stars>1-10
from werkzeug.contrib.fixers import ProxyFix
from flask import Flask, request, session, redirect, url_for
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config')
app.wsgi_app = ProxyFix(app.wsgi_app)
login_manager = LoginManager()
login_manager.init_app(app)
db = SQLAlchemy(app)
def enable_github_oauth(GITHUB_ENABLE):
if not GITHUB_ENABLE:
return None, None
from flask_oauthlib.client import OAuth
oauth = OAuth(app)
github = oauth.remote_app(
'github',
consumer_key=app.config['GITHUB_OAUTH_KEY'],
consumer_secret=app.config['GITHUB_OAUTH_SECRET'],
request_token_params={'scope': app.config['GITHUB_OAUTH_SCOPE']},
base_url=app.config['GITHUB_OAUTH_URL'],
request_token_url=None,
access_token_method='POST',
access_token_url=app.config['GITHUB_OAUTH_TOKEN'],
authorize_url=app.config['GITHUB_OAUTH_AUTHORIZE']
)
@app.route('/user/authorized')
def authorized():
session['github_oauthredir'] = url_for('.authorized', _external=True)
resp = github.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error'],
request.args['error_description']
)
session['github_token'] = (resp['access_token'], '')
return redirect(url_for('.login'))
@github.tokengetter
def get_github_oauth_token():
return session.get('github_token')
return oauth, github
oauth, github = enable_github_oauth(app.config.get('GITHUB_OAUTH_ENABLE'))
from app import views, models
|
from werkzeug.contrib.fixers import ProxyFix
from flask import Flask, request, session, redirect, url_for
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config')
app.wsgi_app = ProxyFix(app.wsgi_app)
login_manager = LoginManager()
login_manager.init_app(app)
db = SQLAlchemy(app)
def enable_github_oauth(GITHUB_ENABLE):
if not GITHUB_ENABLE:
return None, None
from flask_oauthlib.client import OAuth
oauth = OAuth(app)
github = oauth.remote_app(
'github',
consumer_key=app.config['GITHUB_OAUTH_KEY'],
consumer_secret=app.config['GITHUB_OAUTH_SECRET'],
request_token_params={'scope': app.config['GITHUB_OAUTH_SCOPE']},
base_url=app.config['GITHUB_OAUTH_URL'],
request_token_url=None,
access_token_method='POST',
access_token_url=app.config['GITHUB_OAUTH_TOKEN'],
authorize_url=app.config['GITHUB_OAUTH_AUTHORIZE']
)
@app.route('/user/authorized')
def authorized():
session['github_oauthredir'] = url_for('.authorized', _external=True)
resp = github.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error'],
request.args['error_description']
)
session['github_token'] = (resp['access_token'], '')
return redirect(url_for('.login'))
@github.tokengetter
def get_github_oauth_token():
return session.get('github_token')
return oauth, github
oauth, github = enable_github_oauth(app.config.get('GITHUB_OAUTH_ENABLE'))
from app import views, models
|
none
| 1
| 2.314334
| 2
|
|
datasets/dukemtmcreid.py
|
AlexMuresan/centroids-reid
| 53
|
6629178
|
<gh_stars>10-100
# encoding: utf-8
"""
Partially based on work by:
@author: liaoxingyu
@contact: <EMAIL>
Adapted and extended by:
@author: mikwieczorek
"""
import glob
import os.path as osp
import re
from collections import defaultdict
import pytorch_lightning as pl
from torch.utils.data import (DataLoader, Dataset, DistributedSampler,
SequentialSampler)
from .bases import (BaseDatasetLabelled, BaseDatasetLabelledPerPid,
ReidBaseDataModule, collate_fn_alternative, pil_loader)
from .samplers import get_sampler
from .transforms import ReidTransforms
class DukeMTMCreID(ReidBaseDataModule):
"""
DukeMTMC-reID
Reference:
1. Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016.
2. Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017.
URL: https://github.com/layumi/DukeMTMC-reID_evaluation
Dataset statistics:
# identities: 1404 (train + query)
# images:16522 (train) + 2228 (query) + 17661 (gallery)
# cameras: 8
Version that will not supply resampled instances
"""
def __init__(self, cfg, **kwargs):
super().__init__(cfg, **kwargs)
self.dataset_dir = cfg.DATASETS.ROOT_DIR
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_test')
def setup(self):
self._check_before_run()
transforms_base = ReidTransforms(self.cfg)
train, train_dict = self._process_dir(self.train_dir, relabel=True)
self.train_dict = train_dict
self.train_list = train
query, query_dict = self._process_dir(self.query_dir, relabel=False)
gallery, gallery_dict = self._process_dir(self.gallery_dir, relabel=False)
self.query_list = query
self.gallery_list = gallery
self.train = BaseDatasetLabelledPerPid(train_dict, transforms_base.build_transforms(is_train=True), self.num_instances, self.cfg.DATALOADER.USE_RESAMPLING)
self.val = BaseDatasetLabelled(query+gallery, transforms_base.build_transforms(is_train=False))
self._print_dataset_statistics(train, query, gallery)
# For reid_metic to evaluate properly
num_query_pids, num_query_imgs, num_query_cams = self._get_imagedata_info(query)
num_train_pids, num_train_imgs, num_train_cams = self._get_imagedata_info(train)
self.num_query = len(query)
self.num_classes = num_train_pids
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset_dict = defaultdict(list)
dataset = []
for idx, img_path in enumerate(img_paths):
pid, camid = map(int, pattern.search(img_path).groups())
assert 1 <= camid <= 8
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
dataset.append((img_path, pid, camid, idx))
dataset_dict[pid].append((img_path, pid, camid, idx))
return dataset, dataset_dict
|
# encoding: utf-8
"""
Partially based on work by:
@author: liaoxingyu
@contact: <EMAIL>
Adapted and extended by:
@author: mikwieczorek
"""
import glob
import os.path as osp
import re
from collections import defaultdict
import pytorch_lightning as pl
from torch.utils.data import (DataLoader, Dataset, DistributedSampler,
SequentialSampler)
from .bases import (BaseDatasetLabelled, BaseDatasetLabelledPerPid,
ReidBaseDataModule, collate_fn_alternative, pil_loader)
from .samplers import get_sampler
from .transforms import ReidTransforms
class DukeMTMCreID(ReidBaseDataModule):
"""
DukeMTMC-reID
Reference:
1. Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016.
2. Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017.
URL: https://github.com/layumi/DukeMTMC-reID_evaluation
Dataset statistics:
# identities: 1404 (train + query)
# images:16522 (train) + 2228 (query) + 17661 (gallery)
# cameras: 8
Version that will not supply resampled instances
"""
def __init__(self, cfg, **kwargs):
super().__init__(cfg, **kwargs)
self.dataset_dir = cfg.DATASETS.ROOT_DIR
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_test')
def setup(self):
self._check_before_run()
transforms_base = ReidTransforms(self.cfg)
train, train_dict = self._process_dir(self.train_dir, relabel=True)
self.train_dict = train_dict
self.train_list = train
query, query_dict = self._process_dir(self.query_dir, relabel=False)
gallery, gallery_dict = self._process_dir(self.gallery_dir, relabel=False)
self.query_list = query
self.gallery_list = gallery
self.train = BaseDatasetLabelledPerPid(train_dict, transforms_base.build_transforms(is_train=True), self.num_instances, self.cfg.DATALOADER.USE_RESAMPLING)
self.val = BaseDatasetLabelled(query+gallery, transforms_base.build_transforms(is_train=False))
self._print_dataset_statistics(train, query, gallery)
# For reid_metic to evaluate properly
num_query_pids, num_query_imgs, num_query_cams = self._get_imagedata_info(query)
num_train_pids, num_train_imgs, num_train_cams = self._get_imagedata_info(train)
self.num_query = len(query)
self.num_classes = num_train_pids
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset_dict = defaultdict(list)
dataset = []
for idx, img_path in enumerate(img_paths):
pid, camid = map(int, pattern.search(img_path).groups())
assert 1 <= camid <= 8
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
dataset.append((img_path, pid, camid, idx))
dataset_dict[pid].append((img_path, pid, camid, idx))
return dataset, dataset_dict
|
en
| 0.715735
|
# encoding: utf-8 Partially based on work by: @author: liaoxingyu @contact: <EMAIL> Adapted and extended by: @author: mikwieczorek DukeMTMC-reID Reference: 1. Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016. 2. Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017. URL: https://github.com/layumi/DukeMTMC-reID_evaluation Dataset statistics: # identities: 1404 (train + query) # images:16522 (train) + 2228 (query) + 17661 (gallery) # cameras: 8 Version that will not supply resampled instances # For reid_metic to evaluate properly # index starts from 0
| 1.786966
| 2
|
nonlinear_equation/newton/newton.py
|
sunsetyuhi/numcalc_py
| 0
|
6629179
|
#Newton法による非線型方程式の解法プログラム
import numpy as np #NumPyライブラリ
import matplotlib.pyplot as plt #データ可視化ライブラリ
#解きたい方程式
def func_f(x):
return x**2.0 -2.0
#Newton法(方程式の関数項、探索の開始点、微小量、誤差範囲、最大反復回数)
def newton(func_f, x0, eps=1e-10, error=1e-10, max_loop=100):
num_calc = 0 #計算回数
print("{:3d}: x = {:.15f}".format(num_calc, x0))
#ずっと繰り返す
while(True):
#中心差分による微分値
func_df = (func_f(x0 +eps) -func_f(x0 -eps))/(2*eps)
if(abs(func_df) <= eps): #傾きが0に近ければ止める
print("error: abs(func_df) is too small (<=", eps, ").")
quit()
#次の解を計算
x1 = x0 -func_f(x0)/func_df
num_calc += 1 #計算回数を数える
print("{:3d}: x = {:.15f}".format(num_calc, x0))
#「誤差範囲が一定値以下」または「計算回数が一定値以上」ならば終了
if(abs(x1-x0)<=error or max_loop<=num_calc):
break
#解を更新
x0 = x1
#最終的に得られた解
print("x = {:.15f}".format(x0))
return x0
#可視化(方程式の関数項、グラフ左端、グラフ右端、方程式の解)
def visualization(func_f, x_min, x_max, x_solved):
plt.xlabel("$x$") #x軸の名前
plt.ylabel("$f(x)$") #y軸の名前
plt.grid() #点線の目盛りを表示
plt.axhline(0, color='#000000') #f(x)=0の線
#関数
exact_x = np.arange(x_min,x_max, (x_max-x_min)/500.0)
exact_y = func_f(exact_x)
plt.plot(exact_x,exact_y, label="$f(x)$", color='#ff0000') #関数を折線グラフで表示
plt.scatter(x_solved,0.0) #数値解を点グラフで表示
plt.text(x_solved,0.0, "$x$ = {:.9f}".format(x_solved), va='bottom', color='#0000ff')
plt.show() #グラフを表示
#メイン実行部
if (__name__ == '__main__'):
#Newton法で非線型方程式の解を計算
solution = newton(func_f, -2.0)
#結果を可視化
visualization(func_f, solution-1.0, solution+1.0, solution)
|
#Newton法による非線型方程式の解法プログラム
import numpy as np #NumPyライブラリ
import matplotlib.pyplot as plt #データ可視化ライブラリ
#解きたい方程式
def func_f(x):
return x**2.0 -2.0
#Newton法(方程式の関数項、探索の開始点、微小量、誤差範囲、最大反復回数)
def newton(func_f, x0, eps=1e-10, error=1e-10, max_loop=100):
num_calc = 0 #計算回数
print("{:3d}: x = {:.15f}".format(num_calc, x0))
#ずっと繰り返す
while(True):
#中心差分による微分値
func_df = (func_f(x0 +eps) -func_f(x0 -eps))/(2*eps)
if(abs(func_df) <= eps): #傾きが0に近ければ止める
print("error: abs(func_df) is too small (<=", eps, ").")
quit()
#次の解を計算
x1 = x0 -func_f(x0)/func_df
num_calc += 1 #計算回数を数える
print("{:3d}: x = {:.15f}".format(num_calc, x0))
#「誤差範囲が一定値以下」または「計算回数が一定値以上」ならば終了
if(abs(x1-x0)<=error or max_loop<=num_calc):
break
#解を更新
x0 = x1
#最終的に得られた解
print("x = {:.15f}".format(x0))
return x0
#可視化(方程式の関数項、グラフ左端、グラフ右端、方程式の解)
def visualization(func_f, x_min, x_max, x_solved):
plt.xlabel("$x$") #x軸の名前
plt.ylabel("$f(x)$") #y軸の名前
plt.grid() #点線の目盛りを表示
plt.axhline(0, color='#000000') #f(x)=0の線
#関数
exact_x = np.arange(x_min,x_max, (x_max-x_min)/500.0)
exact_y = func_f(exact_x)
plt.plot(exact_x,exact_y, label="$f(x)$", color='#ff0000') #関数を折線グラフで表示
plt.scatter(x_solved,0.0) #数値解を点グラフで表示
plt.text(x_solved,0.0, "$x$ = {:.9f}".format(x_solved), va='bottom', color='#0000ff')
plt.show() #グラフを表示
#メイン実行部
if (__name__ == '__main__'):
#Newton法で非線型方程式の解を計算
solution = newton(func_f, -2.0)
#結果を可視化
visualization(func_f, solution-1.0, solution+1.0, solution)
|
ja
| 0.995332
|
#Newton法による非線型方程式の解法プログラム #NumPyライブラリ #データ可視化ライブラリ #解きたい方程式 #Newton法(方程式の関数項、探索の開始点、微小量、誤差範囲、最大反復回数) #計算回数 #ずっと繰り返す #中心差分による微分値 #傾きが0に近ければ止める #次の解を計算 #計算回数を数える #「誤差範囲が一定値以下」または「計算回数が一定値以上」ならば終了 #解を更新 #最終的に得られた解 #可視化(方程式の関数項、グラフ左端、グラフ右端、方程式の解) #x軸の名前 #y軸の名前 #点線の目盛りを表示 #f(x)=0の線 #関数 #関数を折線グラフで表示 #数値解を点グラフで表示 #グラフを表示 #メイン実行部 #Newton法で非線型方程式の解を計算 #結果を可視化
| 4.066316
| 4
|
smartcross/envs/action/__init__.py
|
opendilab/DI-smartcross
| 49
|
6629180
|
<gh_stars>10-100
from .sumo_action import SumoAction
from .sumo_action_runner import SumoActionRunner
|
from .sumo_action import SumoAction
from .sumo_action_runner import SumoActionRunner
|
none
| 1
| 1.054706
| 1
|
|
resources/lib/globals.py
|
otava5/plugin.video.netflix
| 0
|
6629181
|
<reponame>otava5/plugin.video.netflix
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 <NAME> (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Global addon constants
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
# Everything that is to be globally accessible must be defined in this module
# and initialized in GlobalVariables.init_globals.
# When reusing Kodi languageInvokers, only the code in the main module
# (addon.py or service.py) will be run every time the addon is called.
# All other code executed on module level will only be executed once, when
# the module is first imported on the first addon invocation.
from __future__ import absolute_import, division, unicode_literals
import collections
import os
import sys
try: # Python 3
from urllib.parse import parse_qsl, unquote, urlparse
except ImportError: # Python 2
from urllib2 import unquote
from urlparse import parse_qsl, urlparse
from future.utils import iteritems
import xbmc
import xbmcaddon
class GlobalVariables(object):
"""Encapsulation for global variables to work around quirks with
Kodi's reuseLanguageInvoker behavior"""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=invalid-name, too-many-instance-attributes
# Values in the variables VIEW_* stand for a partial menu id,
# contained in the settings xml, example 'profiles' stand for id 'viewmodeprofiles'
VIEW_PROFILES = 'profiles'
VIEW_MAINMENU = 'mainmenu'
VIEW_MYLIST = 'mylist'
VIEW_FOLDER = 'folder'
VIEW_MOVIE = 'movie'
VIEW_SHOW = 'show'
VIEW_SEASON = 'season'
VIEW_EPISODE = 'episode'
VIEW_SEARCH = 'search'
VIEW_EXPORTED = 'exported'
CONTENT_IMAGES = 'images'
CONTENT_FOLDER = 'files'
CONTENT_MOVIE = 'movies'
CONTENT_SHOW = 'tvshows'
CONTENT_SEASON = 'seasons'
CONTENT_EPISODE = 'episodes'
'''
--Main Menu key infos--
path : passes information to the called method generally structured as follows: [func. name, menu id, context id]
lolomo_contexts : contexts used to obtain the list of contents (use only one context when lolomo_known = True)
lolomo_known : if True, keys label_id/description_id/icon are ignored, the values are obtained from lolomo list
label_id : menu title
description_id : description info text
icon : set a default image
view : override the default "partial menu id" of view
content_type : override the default content type (CONTENT_SHOW)
Explanation of function names in the 'path' key:
video_list: automatically gets the list_id by making a lolomo request,
the list_id search is made using the value specified on the lolomo_contexts key
video_list_sorted: to work must have a third argument on the path that is the context_id
or instead specified the key request_context_name
'''
MAIN_MENU_ITEMS = collections.OrderedDict([
('myList', {'path': ['video_list_sorted', 'myList'],
'lolomo_contexts': ['queue'],
'lolomo_known': True,
'request_context_name': 'mylist',
'view': VIEW_MYLIST}),
('continueWatching', {'path': ['video_list', 'continueWatching'],
'lolomo_contexts': ['continueWatching'],
'lolomo_known': True}),
('chosenForYou', {'path': ['video_list', 'chosenForYou'],
'lolomo_contexts': ['topTen'],
'lolomo_known': True}),
('recentlyAdded', {'path': ['video_list_sorted', 'recentlyAdded', '1592210'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres',
'label_id': 30145,
'description_id': 30146,
'icon': 'DefaultRecentlyAddedMovies.png'}),
('newRelease', {'path': ['video_list_sorted', 'newRelease'],
'lolomo_contexts': ['newRelease'],
'lolomo_known': True,
'request_context_name': 'newrelease'}),
('currentTitles', {'path': ['video_list', 'currentTitles'],
'lolomo_contexts': ['trendingNow'],
'lolomo_known': True}),
('mostViewed', {'path': ['video_list', 'mostViewed'],
'lolomo_contexts': ['popularTitles'],
'lolomo_known': True}),
('netflixOriginals', {'path': ['video_list_sorted', 'netflixOriginals', '839338'],
'lolomo_contexts': ['netflixOriginals'],
'lolomo_known': True,
'request_context_name': 'genres'}),
('assistiveAudio', {'path': ['video_list_sorted', 'assistiveAudio', 'None'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'assistiveAudio',
'label_id': 30163,
'description_id': 30164,
'icon': 'DefaultTVShows.png'}),
('recommendations', {'path': ['recommendations', 'recommendations'],
'lolomo_contexts': ['similars', 'becauseYouAdded'],
'lolomo_known': False,
'label_id': 30001,
'description_id': 30094,
'icon': 'DefaultUser.png'}),
('tvshowsGenres', {'path': ['subgenres', 'tvshowsGenres', '83'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30174,
'description_id': None,
'icon': 'DefaultTVShows.png'}),
('moviesGenres', {'path': ['subgenres', 'moviesGenres', '34399'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30175,
'description_id': None,
'icon': 'DefaultMovies.png',
'content_type': CONTENT_MOVIE}),
('tvshows', {'path': ['genres', 'tvshows', '83'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30095,
'description_id': None,
'icon': 'DefaultTVShows.png'}),
('movies', {'path': ['genres', 'movies', '34399'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30096,
'description_id': None,
'icon': 'DefaultMovies.png',
'content_type': CONTENT_MOVIE}),
('genres', {'path': ['genres', 'genres'],
'lolomo_contexts': ['genre'],
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30010,
'description_id': 30093,
'icon': 'DefaultGenre.png'}),
('search', {'path': ['search', 'search'],
'lolomo_contexts': None,
'lolomo_known': False,
'label_id': 30011,
'description_id': 30092,
'icon': None,
'view': VIEW_SEARCH}),
('exported', {'path': ['exported', 'exported'],
'lolomo_contexts': None,
'lolomo_known': False,
'label_id': 30048,
'description_id': 30091,
'icon': 'DefaultHardDisk.png',
'view': VIEW_EXPORTED})
])
MODE_DIRECTORY = 'directory'
MODE_HUB = 'hub'
MODE_ACTION = 'action'
MODE_PLAY = 'play'
MODE_LIBRARY = 'library'
def __init__(self):
"""Do nothing on constructing the object"""
# Define here any variables necessary for the correct loading of the modules
self.IS_ADDON_FIRSTRUN = None
self.ADDON = None
self.ADDON_DATA_PATH = None
self.DATA_PATH = None
self.CACHE_METADATA_TTL = None
def init_globals(self, argv, reinitialize_database=False):
"""Initialized globally used module variables.
Needs to be called at start of each plugin instance!
This is an ugly hack because Kodi doesn't execute statements defined on
module level if reusing a language invoker."""
# IS_ADDON_FIRSTRUN specifies when the addon is at its first run (reuselanguageinvoker is not yet used)
self.IS_ADDON_FIRSTRUN = self.IS_ADDON_FIRSTRUN is None
self.PY_IS_VER2 = sys.version_info.major == 2
self.COOKIES = {}
self.ADDON = xbmcaddon.Addon()
self.ADDON_ID = self.ADDON.getAddonInfo('id')
self.PLUGIN = self.ADDON.getAddonInfo('name')
self.VERSION = self.ADDON.getAddonInfo('version')
self.DEFAULT_FANART = self.ADDON.getAddonInfo('fanart')
self.ICON = self.ADDON.getAddonInfo('icon')
self.ADDON_DATA_PATH = self.ADDON.getAddonInfo('path') # Addon folder
self.DATA_PATH = self.ADDON.getAddonInfo('profile') # Addon user data folder
# Add absolute paths of embedded py modules to python system directory
module_paths = [
os.path.join(self.ADDON_DATA_PATH, 'modules', 'mysql-connector-python')
]
for path in module_paths:
path = xbmc.translatePath(path)
if path not in sys.path:
sys.path.insert(0, path)
self.CACHE_PATH = os.path.join(self.DATA_PATH, 'cache')
self.COOKIE_PATH = os.path.join(self.DATA_PATH, 'COOKIE')
self.CACHE_TTL = self.ADDON.getSettingInt('cache_ttl') * 60
self.CACHE_METADATA_TTL = (
self.ADDON.getSettingInt('cache_metadata_ttl') * 24 * 60 * 60)
self.URL = urlparse(argv[0])
try:
self.PLUGIN_HANDLE = int(argv[1])
self.IS_SERVICE = False
except IndexError:
self.PLUGIN_HANDLE = 0
self.IS_SERVICE = True
self.BASE_URL = '{scheme}://{netloc}'.format(scheme=self.URL[0],
netloc=self.URL[1])
self.PATH = g.py2_decode(unquote(self.URL[2][1:]))
try:
self.PARAM_STRING = argv[2][1:]
except IndexError:
self.PARAM_STRING = ''
self.REQUEST_PARAMS = dict(parse_qsl(self.PARAM_STRING))
self.reset_time_trace()
self.TIME_TRACE_ENABLED = self.ADDON.getSettingBool('enable_timing')
self.IPC_OVER_HTTP = self.ADDON.getSettingBool('enable_ipc_over_http')
self._init_database(self.IS_ADDON_FIRSTRUN or reinitialize_database)
self.settings_monitor_suspend(False) # Reset the value in case of addon crash
if self.IS_ADDON_FIRSTRUN or self.IS_SERVICE:
self._init_cache()
def _init_database(self, initialize):
# Initialize local database
if initialize:
import resources.lib.database.db_local as db_local
self.LOCAL_DB = db_local.NFLocalDatabase()
# Initialize shared database
use_mysql = g.ADDON.getSettingBool('use_mysql')
if initialize or use_mysql:
import resources.lib.database.db_shared as db_shared
from resources.lib.database.db_exceptions import MySQLConnectionError
try:
shared_db_class = db_shared.get_shareddb_class(use_mysql=use_mysql)
self.SHARED_DB = shared_db_class()
except MySQLConnectionError:
# The MySQL database cannot be reached, fallback to local SQLite database
# When this code is called from addon, is needed apply the change also in the
# service, so disabling it run the SettingsMonitor
import resources.lib.kodi.ui as ui
self.ADDON.setSettingBool('use_mysql', False)
ui.show_notification(self.ADDON.getLocalizedString(30206), time=10000)
shared_db_class = db_shared.get_shareddb_class()
self.SHARED_DB = shared_db_class()
def _init_cache(self):
if not os.path.exists(g.py2_decode(xbmc.translatePath(self.CACHE_PATH))):
self._init_filesystem_cache()
from resources.lib.cache import Cache
self.CACHE = Cache(self.CACHE_PATH, self.PLUGIN_HANDLE)
def _init_filesystem_cache(self):
from xbmcvfs import mkdirs
from resources.lib.cache import BUCKET_NAMES
for bucket in BUCKET_NAMES:
mkdirs(xbmc.translatePath(os.path.join(self.CACHE_PATH, bucket)))
def initial_addon_configuration(self):
"""
Initial addon configuration,
helps users to automatically configure addon parameters for proper viewing of videos
"""
run_initial_config = self.ADDON.getSettingBool('run_init_configuration')
if run_initial_config:
from resources.lib.common import (debug, get_system_platform, get_local_string)
from resources.lib.kodi.ui import (ask_for_confirmation, show_ok_dialog)
self.settings_monitor_suspend(True, False)
system = get_system_platform()
debug('Running initial addon configuration dialogs on system: {}', system)
if system in ['osx', 'ios', 'xbox']:
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', True)
elif system == 'windows':
# Currently inputstream does not support hardware video acceleration on windows,
# there is no guarantee that we will get 4K without video hardware acceleration,
# so no 4K configuration
self.ADDON.setSettingBool('enable_vp9_profiles', True)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
elif system == 'android':
ultrahd_capable_device = False
premium_account = ask_for_confirmation(get_local_string(30154),
get_local_string(30155))
if premium_account:
ultrahd_capable_device = ask_for_confirmation(get_local_string(30154),
get_local_string(30156))
if ultrahd_capable_device:
show_ok_dialog(get_local_string(30154), get_local_string(30157))
ia_enabled = xbmc.getCondVisibility('System.HasAddon(inputstream.adaptive)')
if ia_enabled:
xbmc.executebuiltin('Addon.OpenSettings(inputstream.adaptive)')
else:
show_ok_dialog(get_local_string(30154), get_local_string(30046))
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', True)
else:
# VP9 should have better performance since there is no need for 4k
self.ADDON.setSettingBool('enable_vp9_profiles', True)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
self.ADDON.setSettingBool('enable_force_hdcp', ultrahd_capable_device)
elif system == 'linux':
# Too many different linux systems, we can not predict all the behaviors
# Some linux distributions have encountered problems with VP9,
# OMSC users complain that hevc creates problems
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
else:
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
self.ADDON.setSettingBool('run_init_configuration', False)
self.settings_monitor_suspend(False)
def settings_monitor_suspend(self, is_suspended=True, at_first_change=False):
"""
Suspends for the necessary time the settings monitor of the service
that otherwise cause the reinitialization of global settings and possible consequent actions
to settings changes or unnecessary checks when a setting will be changed.
:param is_suspended: True/False - allows or denies the execution of the settings monitor
:param at_first_change:
True - monitor setting is automatically reactivated after the FIRST change to the settings
False - monitor setting MUST BE REACTIVATED MANUALLY
:return: None
"""
if is_suspended and at_first_change:
new_value = 'First'
else:
new_value = str(is_suspended)
# Accepted values in string: First, True, False
current_value = g.LOCAL_DB.get_value('suspend_settings_monitor', 'False')
if new_value == current_value:
return
g.LOCAL_DB.set_value('suspend_settings_monitor', new_value)
def settings_monitor_suspend_status(self):
"""
Returns the suspend status of settings monitor
"""
return g.LOCAL_DB.get_value('suspend_settings_monitor', 'False')
def get_esn(self):
"""Get the generated esn or if set get the custom esn"""
from resources.lib.database.db_utils import TABLE_SESSION
custom_esn = g.ADDON.getSetting('esn')
return custom_esn if custom_esn else g.LOCAL_DB.get_value('esn', table=TABLE_SESSION)
def get_edge_esn(self):
"""Get a previously generated edge ESN from the settings or generate
a new one if none exists"""
return self.ADDON.getSetting('edge_esn') or self.generate_edge_esn()
def generate_edge_esn(self):
"""Generate a random EDGE ESN and save it to the settings"""
import random
esn = ['NFCDIE-02-']
possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
for _ in range(0, 30):
esn.append(random.choice(possible))
edge_esn = ''.join(esn)
self.settings_monitor_suspend(True, True)
self.ADDON.setSetting('edge_esn', edge_esn)
return edge_esn
def is_known_menu_context(self, context):
"""Return true if context are one of the menu with lolomo_known=True"""
for menu_id, data in iteritems(self.MAIN_MENU_ITEMS): # pylint: disable=unused-variable
if data['lolomo_known']:
if data['lolomo_contexts'][0] == context:
return True
return False
def flush_settings(self):
"""Reload the ADDON"""
# pylint: disable=attribute-defined-outside-init
self.ADDON = xbmcaddon.Addon()
def reset_time_trace(self):
"""Reset current time trace info"""
self.TIME_TRACE = []
self.time_trace_level = -2
def add_time_trace_level(self):
"""Add a level to the time trace"""
self.time_trace_level += 2
def remove_time_trace_level(self):
"""Remove a level from the time trace"""
self.time_trace_level -= 2
def py2_decode(self, value):
"""Decode text only on python 2"""
# To remove when Kodi 18 support is over / Py2 dead
if self.PY_IS_VER2:
return value.decode('utf-8')
return value
def py2_encode(self, value):
"""Encode text only on python 2"""
# To remove when Kodi 18 support is over / Py2 dead
if self.PY_IS_VER2:
return value.encode('utf-8')
return value
# pylint: disable=invalid-name
# This will have no effect most of the time, as it doesn't seem to be executed
# on subsequent addon invocations when reuseLanguageInvoker is being used.
# We initialize an empty instance so the instance is importable from run_addon.py
# and run_service.py, where g.init_globals(sys.argv) MUST be called before doing
# anything else (even BEFORE OTHER IMPORTS from this addon)
g = GlobalVariables()
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 <NAME> (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Global addon constants
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
# Everything that is to be globally accessible must be defined in this module
# and initialized in GlobalVariables.init_globals.
# When reusing Kodi languageInvokers, only the code in the main module
# (addon.py or service.py) will be run every time the addon is called.
# All other code executed on module level will only be executed once, when
# the module is first imported on the first addon invocation.
from __future__ import absolute_import, division, unicode_literals
import collections
import os
import sys
try: # Python 3
from urllib.parse import parse_qsl, unquote, urlparse
except ImportError: # Python 2
from urllib2 import unquote
from urlparse import parse_qsl, urlparse
from future.utils import iteritems
import xbmc
import xbmcaddon
class GlobalVariables(object):
"""Encapsulation for global variables to work around quirks with
Kodi's reuseLanguageInvoker behavior"""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=invalid-name, too-many-instance-attributes
# Values in the variables VIEW_* stand for a partial menu id,
# contained in the settings xml, example 'profiles' stand for id 'viewmodeprofiles'
VIEW_PROFILES = 'profiles'
VIEW_MAINMENU = 'mainmenu'
VIEW_MYLIST = 'mylist'
VIEW_FOLDER = 'folder'
VIEW_MOVIE = 'movie'
VIEW_SHOW = 'show'
VIEW_SEASON = 'season'
VIEW_EPISODE = 'episode'
VIEW_SEARCH = 'search'
VIEW_EXPORTED = 'exported'
CONTENT_IMAGES = 'images'
CONTENT_FOLDER = 'files'
CONTENT_MOVIE = 'movies'
CONTENT_SHOW = 'tvshows'
CONTENT_SEASON = 'seasons'
CONTENT_EPISODE = 'episodes'
'''
--Main Menu key infos--
path : passes information to the called method generally structured as follows: [func. name, menu id, context id]
lolomo_contexts : contexts used to obtain the list of contents (use only one context when lolomo_known = True)
lolomo_known : if True, keys label_id/description_id/icon are ignored, the values are obtained from lolomo list
label_id : menu title
description_id : description info text
icon : set a default image
view : override the default "partial menu id" of view
content_type : override the default content type (CONTENT_SHOW)
Explanation of function names in the 'path' key:
video_list: automatically gets the list_id by making a lolomo request,
the list_id search is made using the value specified on the lolomo_contexts key
video_list_sorted: to work must have a third argument on the path that is the context_id
or instead specified the key request_context_name
'''
MAIN_MENU_ITEMS = collections.OrderedDict([
('myList', {'path': ['video_list_sorted', 'myList'],
'lolomo_contexts': ['queue'],
'lolomo_known': True,
'request_context_name': 'mylist',
'view': VIEW_MYLIST}),
('continueWatching', {'path': ['video_list', 'continueWatching'],
'lolomo_contexts': ['continueWatching'],
'lolomo_known': True}),
('chosenForYou', {'path': ['video_list', 'chosenForYou'],
'lolomo_contexts': ['topTen'],
'lolomo_known': True}),
('recentlyAdded', {'path': ['video_list_sorted', 'recentlyAdded', '1592210'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres',
'label_id': 30145,
'description_id': 30146,
'icon': 'DefaultRecentlyAddedMovies.png'}),
('newRelease', {'path': ['video_list_sorted', 'newRelease'],
'lolomo_contexts': ['newRelease'],
'lolomo_known': True,
'request_context_name': 'newrelease'}),
('currentTitles', {'path': ['video_list', 'currentTitles'],
'lolomo_contexts': ['trendingNow'],
'lolomo_known': True}),
('mostViewed', {'path': ['video_list', 'mostViewed'],
'lolomo_contexts': ['popularTitles'],
'lolomo_known': True}),
('netflixOriginals', {'path': ['video_list_sorted', 'netflixOriginals', '839338'],
'lolomo_contexts': ['netflixOriginals'],
'lolomo_known': True,
'request_context_name': 'genres'}),
('assistiveAudio', {'path': ['video_list_sorted', 'assistiveAudio', 'None'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'assistiveAudio',
'label_id': 30163,
'description_id': 30164,
'icon': 'DefaultTVShows.png'}),
('recommendations', {'path': ['recommendations', 'recommendations'],
'lolomo_contexts': ['similars', 'becauseYouAdded'],
'lolomo_known': False,
'label_id': 30001,
'description_id': 30094,
'icon': 'DefaultUser.png'}),
('tvshowsGenres', {'path': ['subgenres', 'tvshowsGenres', '83'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30174,
'description_id': None,
'icon': 'DefaultTVShows.png'}),
('moviesGenres', {'path': ['subgenres', 'moviesGenres', '34399'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30175,
'description_id': None,
'icon': 'DefaultMovies.png',
'content_type': CONTENT_MOVIE}),
('tvshows', {'path': ['genres', 'tvshows', '83'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30095,
'description_id': None,
'icon': 'DefaultTVShows.png'}),
('movies', {'path': ['genres', 'movies', '34399'],
'lolomo_contexts': None,
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30096,
'description_id': None,
'icon': 'DefaultMovies.png',
'content_type': CONTENT_MOVIE}),
('genres', {'path': ['genres', 'genres'],
'lolomo_contexts': ['genre'],
'lolomo_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30010,
'description_id': 30093,
'icon': 'DefaultGenre.png'}),
('search', {'path': ['search', 'search'],
'lolomo_contexts': None,
'lolomo_known': False,
'label_id': 30011,
'description_id': 30092,
'icon': None,
'view': VIEW_SEARCH}),
('exported', {'path': ['exported', 'exported'],
'lolomo_contexts': None,
'lolomo_known': False,
'label_id': 30048,
'description_id': 30091,
'icon': 'DefaultHardDisk.png',
'view': VIEW_EXPORTED})
])
MODE_DIRECTORY = 'directory'
MODE_HUB = 'hub'
MODE_ACTION = 'action'
MODE_PLAY = 'play'
MODE_LIBRARY = 'library'
def __init__(self):
"""Do nothing on constructing the object"""
# Define here any variables necessary for the correct loading of the modules
self.IS_ADDON_FIRSTRUN = None
self.ADDON = None
self.ADDON_DATA_PATH = None
self.DATA_PATH = None
self.CACHE_METADATA_TTL = None
def init_globals(self, argv, reinitialize_database=False):
"""Initialized globally used module variables.
Needs to be called at start of each plugin instance!
This is an ugly hack because Kodi doesn't execute statements defined on
module level if reusing a language invoker."""
# IS_ADDON_FIRSTRUN specifies when the addon is at its first run (reuselanguageinvoker is not yet used)
self.IS_ADDON_FIRSTRUN = self.IS_ADDON_FIRSTRUN is None
self.PY_IS_VER2 = sys.version_info.major == 2
self.COOKIES = {}
self.ADDON = xbmcaddon.Addon()
self.ADDON_ID = self.ADDON.getAddonInfo('id')
self.PLUGIN = self.ADDON.getAddonInfo('name')
self.VERSION = self.ADDON.getAddonInfo('version')
self.DEFAULT_FANART = self.ADDON.getAddonInfo('fanart')
self.ICON = self.ADDON.getAddonInfo('icon')
self.ADDON_DATA_PATH = self.ADDON.getAddonInfo('path') # Addon folder
self.DATA_PATH = self.ADDON.getAddonInfo('profile') # Addon user data folder
# Add absolute paths of embedded py modules to python system directory
module_paths = [
os.path.join(self.ADDON_DATA_PATH, 'modules', 'mysql-connector-python')
]
for path in module_paths:
path = xbmc.translatePath(path)
if path not in sys.path:
sys.path.insert(0, path)
self.CACHE_PATH = os.path.join(self.DATA_PATH, 'cache')
self.COOKIE_PATH = os.path.join(self.DATA_PATH, 'COOKIE')
self.CACHE_TTL = self.ADDON.getSettingInt('cache_ttl') * 60
self.CACHE_METADATA_TTL = (
self.ADDON.getSettingInt('cache_metadata_ttl') * 24 * 60 * 60)
self.URL = urlparse(argv[0])
try:
self.PLUGIN_HANDLE = int(argv[1])
self.IS_SERVICE = False
except IndexError:
self.PLUGIN_HANDLE = 0
self.IS_SERVICE = True
self.BASE_URL = '{scheme}://{netloc}'.format(scheme=self.URL[0],
netloc=self.URL[1])
self.PATH = g.py2_decode(unquote(self.URL[2][1:]))
try:
self.PARAM_STRING = argv[2][1:]
except IndexError:
self.PARAM_STRING = ''
self.REQUEST_PARAMS = dict(parse_qsl(self.PARAM_STRING))
self.reset_time_trace()
self.TIME_TRACE_ENABLED = self.ADDON.getSettingBool('enable_timing')
self.IPC_OVER_HTTP = self.ADDON.getSettingBool('enable_ipc_over_http')
self._init_database(self.IS_ADDON_FIRSTRUN or reinitialize_database)
self.settings_monitor_suspend(False) # Reset the value in case of addon crash
if self.IS_ADDON_FIRSTRUN or self.IS_SERVICE:
self._init_cache()
def _init_database(self, initialize):
# Initialize local database
if initialize:
import resources.lib.database.db_local as db_local
self.LOCAL_DB = db_local.NFLocalDatabase()
# Initialize shared database
use_mysql = g.ADDON.getSettingBool('use_mysql')
if initialize or use_mysql:
import resources.lib.database.db_shared as db_shared
from resources.lib.database.db_exceptions import MySQLConnectionError
try:
shared_db_class = db_shared.get_shareddb_class(use_mysql=use_mysql)
self.SHARED_DB = shared_db_class()
except MySQLConnectionError:
# The MySQL database cannot be reached, fallback to local SQLite database
# When this code is called from addon, is needed apply the change also in the
# service, so disabling it run the SettingsMonitor
import resources.lib.kodi.ui as ui
self.ADDON.setSettingBool('use_mysql', False)
ui.show_notification(self.ADDON.getLocalizedString(30206), time=10000)
shared_db_class = db_shared.get_shareddb_class()
self.SHARED_DB = shared_db_class()
def _init_cache(self):
if not os.path.exists(g.py2_decode(xbmc.translatePath(self.CACHE_PATH))):
self._init_filesystem_cache()
from resources.lib.cache import Cache
self.CACHE = Cache(self.CACHE_PATH, self.PLUGIN_HANDLE)
def _init_filesystem_cache(self):
from xbmcvfs import mkdirs
from resources.lib.cache import BUCKET_NAMES
for bucket in BUCKET_NAMES:
mkdirs(xbmc.translatePath(os.path.join(self.CACHE_PATH, bucket)))
def initial_addon_configuration(self):
"""
Initial addon configuration,
helps users to automatically configure addon parameters for proper viewing of videos
"""
run_initial_config = self.ADDON.getSettingBool('run_init_configuration')
if run_initial_config:
from resources.lib.common import (debug, get_system_platform, get_local_string)
from resources.lib.kodi.ui import (ask_for_confirmation, show_ok_dialog)
self.settings_monitor_suspend(True, False)
system = get_system_platform()
debug('Running initial addon configuration dialogs on system: {}', system)
if system in ['osx', 'ios', 'xbox']:
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', True)
elif system == 'windows':
# Currently inputstream does not support hardware video acceleration on windows,
# there is no guarantee that we will get 4K without video hardware acceleration,
# so no 4K configuration
self.ADDON.setSettingBool('enable_vp9_profiles', True)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
elif system == 'android':
ultrahd_capable_device = False
premium_account = ask_for_confirmation(get_local_string(30154),
get_local_string(30155))
if premium_account:
ultrahd_capable_device = ask_for_confirmation(get_local_string(30154),
get_local_string(30156))
if ultrahd_capable_device:
show_ok_dialog(get_local_string(30154), get_local_string(30157))
ia_enabled = xbmc.getCondVisibility('System.HasAddon(inputstream.adaptive)')
if ia_enabled:
xbmc.executebuiltin('Addon.OpenSettings(inputstream.adaptive)')
else:
show_ok_dialog(get_local_string(30154), get_local_string(30046))
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', True)
else:
# VP9 should have better performance since there is no need for 4k
self.ADDON.setSettingBool('enable_vp9_profiles', True)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
self.ADDON.setSettingBool('enable_force_hdcp', ultrahd_capable_device)
elif system == 'linux':
# Too many different linux systems, we can not predict all the behaviors
# Some linux distributions have encountered problems with VP9,
# OMSC users complain that hevc creates problems
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
else:
self.ADDON.setSettingBool('enable_vp9_profiles', False)
self.ADDON.setSettingBool('enable_hevc_profiles', False)
self.ADDON.setSettingBool('run_init_configuration', False)
self.settings_monitor_suspend(False)
def settings_monitor_suspend(self, is_suspended=True, at_first_change=False):
"""
Suspends for the necessary time the settings monitor of the service
that otherwise cause the reinitialization of global settings and possible consequent actions
to settings changes or unnecessary checks when a setting will be changed.
:param is_suspended: True/False - allows or denies the execution of the settings monitor
:param at_first_change:
True - monitor setting is automatically reactivated after the FIRST change to the settings
False - monitor setting MUST BE REACTIVATED MANUALLY
:return: None
"""
if is_suspended and at_first_change:
new_value = 'First'
else:
new_value = str(is_suspended)
# Accepted values in string: First, True, False
current_value = g.LOCAL_DB.get_value('suspend_settings_monitor', 'False')
if new_value == current_value:
return
g.LOCAL_DB.set_value('suspend_settings_monitor', new_value)
def settings_monitor_suspend_status(self):
"""
Returns the suspend status of settings monitor
"""
return g.LOCAL_DB.get_value('suspend_settings_monitor', 'False')
def get_esn(self):
"""Get the generated esn or if set get the custom esn"""
from resources.lib.database.db_utils import TABLE_SESSION
custom_esn = g.ADDON.getSetting('esn')
return custom_esn if custom_esn else g.LOCAL_DB.get_value('esn', table=TABLE_SESSION)
def get_edge_esn(self):
"""Get a previously generated edge ESN from the settings or generate
a new one if none exists"""
return self.ADDON.getSetting('edge_esn') or self.generate_edge_esn()
def generate_edge_esn(self):
"""Generate a random EDGE ESN and save it to the settings"""
import random
esn = ['NFCDIE-02-']
possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
for _ in range(0, 30):
esn.append(random.choice(possible))
edge_esn = ''.join(esn)
self.settings_monitor_suspend(True, True)
self.ADDON.setSetting('edge_esn', edge_esn)
return edge_esn
def is_known_menu_context(self, context):
"""Return true if context are one of the menu with lolomo_known=True"""
for menu_id, data in iteritems(self.MAIN_MENU_ITEMS): # pylint: disable=unused-variable
if data['lolomo_known']:
if data['lolomo_contexts'][0] == context:
return True
return False
def flush_settings(self):
"""Reload the ADDON"""
# pylint: disable=attribute-defined-outside-init
self.ADDON = xbmcaddon.Addon()
def reset_time_trace(self):
"""Reset current time trace info"""
self.TIME_TRACE = []
self.time_trace_level = -2
def add_time_trace_level(self):
"""Add a level to the time trace"""
self.time_trace_level += 2
def remove_time_trace_level(self):
"""Remove a level from the time trace"""
self.time_trace_level -= 2
def py2_decode(self, value):
"""Decode text only on python 2"""
# To remove when Kodi 18 support is over / Py2 dead
if self.PY_IS_VER2:
return value.decode('utf-8')
return value
def py2_encode(self, value):
"""Encode text only on python 2"""
# To remove when Kodi 18 support is over / Py2 dead
if self.PY_IS_VER2:
return value.encode('utf-8')
return value
# pylint: disable=invalid-name
# This will have no effect most of the time, as it doesn't seem to be executed
# on subsequent addon invocations when reuseLanguageInvoker is being used.
# We initialize an empty instance so the instance is importable from run_addon.py
# and run_service.py, where g.init_globals(sys.argv) MUST be called before doing
# anything else (even BEFORE OTHER IMPORTS from this addon)
g = GlobalVariables()
|
en
| 0.72445
|
# -*- coding: utf-8 -*- Copyright (C) 2017 <NAME> (plugin.video.netflix) Copyright (C) 2018 Caphm (original implementation module) Global addon constants SPDX-License-Identifier: MIT See LICENSES/MIT.md for more information. # Everything that is to be globally accessible must be defined in this module # and initialized in GlobalVariables.init_globals. # When reusing Kodi languageInvokers, only the code in the main module # (addon.py or service.py) will be run every time the addon is called. # All other code executed on module level will only be executed once, when # the module is first imported on the first addon invocation. # Python 3 # Python 2 Encapsulation for global variables to work around quirks with Kodi's reuseLanguageInvoker behavior # pylint: disable=attribute-defined-outside-init # pylint: disable=invalid-name, too-many-instance-attributes # Values in the variables VIEW_* stand for a partial menu id, # contained in the settings xml, example 'profiles' stand for id 'viewmodeprofiles' --Main Menu key infos-- path : passes information to the called method generally structured as follows: [func. name, menu id, context id] lolomo_contexts : contexts used to obtain the list of contents (use only one context when lolomo_known = True) lolomo_known : if True, keys label_id/description_id/icon are ignored, the values are obtained from lolomo list label_id : menu title description_id : description info text icon : set a default image view : override the default "partial menu id" of view content_type : override the default content type (CONTENT_SHOW) Explanation of function names in the 'path' key: video_list: automatically gets the list_id by making a lolomo request, the list_id search is made using the value specified on the lolomo_contexts key video_list_sorted: to work must have a third argument on the path that is the context_id or instead specified the key request_context_name # Used for sub-menus # Used for sub-menus # Used for sub-menus # Used for sub-menus # Used for sub-menus Do nothing on constructing the object # Define here any variables necessary for the correct loading of the modules Initialized globally used module variables. Needs to be called at start of each plugin instance! This is an ugly hack because Kodi doesn't execute statements defined on module level if reusing a language invoker. # IS_ADDON_FIRSTRUN specifies when the addon is at its first run (reuselanguageinvoker is not yet used) # Addon folder # Addon user data folder # Add absolute paths of embedded py modules to python system directory # Reset the value in case of addon crash # Initialize local database # Initialize shared database # The MySQL database cannot be reached, fallback to local SQLite database # When this code is called from addon, is needed apply the change also in the # service, so disabling it run the SettingsMonitor Initial addon configuration, helps users to automatically configure addon parameters for proper viewing of videos # Currently inputstream does not support hardware video acceleration on windows, # there is no guarantee that we will get 4K without video hardware acceleration, # so no 4K configuration # VP9 should have better performance since there is no need for 4k # Too many different linux systems, we can not predict all the behaviors # Some linux distributions have encountered problems with VP9, # OMSC users complain that hevc creates problems Suspends for the necessary time the settings monitor of the service that otherwise cause the reinitialization of global settings and possible consequent actions to settings changes or unnecessary checks when a setting will be changed. :param is_suspended: True/False - allows or denies the execution of the settings monitor :param at_first_change: True - monitor setting is automatically reactivated after the FIRST change to the settings False - monitor setting MUST BE REACTIVATED MANUALLY :return: None # Accepted values in string: First, True, False Returns the suspend status of settings monitor Get the generated esn or if set get the custom esn Get a previously generated edge ESN from the settings or generate a new one if none exists Generate a random EDGE ESN and save it to the settings Return true if context are one of the menu with lolomo_known=True # pylint: disable=unused-variable Reload the ADDON # pylint: disable=attribute-defined-outside-init Reset current time trace info Add a level to the time trace Remove a level from the time trace Decode text only on python 2 # To remove when Kodi 18 support is over / Py2 dead Encode text only on python 2 # To remove when Kodi 18 support is over / Py2 dead # pylint: disable=invalid-name # This will have no effect most of the time, as it doesn't seem to be executed # on subsequent addon invocations when reuseLanguageInvoker is being used. # We initialize an empty instance so the instance is importable from run_addon.py # and run_service.py, where g.init_globals(sys.argv) MUST be called before doing # anything else (even BEFORE OTHER IMPORTS from this addon)
| 2.180318
| 2
|
plotly/figure_factory/_bullet.py
|
gnestor/plotly.py
| 2
|
6629182
|
<gh_stars>1-10
from __future__ import absolute_import
import collections
import math
from plotly import colors, exceptions, optional_imports
from plotly.figure_factory import utils
import plotly
import plotly.graph_objs as go
pd = optional_imports.get_module('pandas')
def is_sequence(obj):
return (isinstance(obj, collections.Sequence) and
not isinstance(obj, str))
def _bullet(df, markers, measures, ranges, subtitles, titles, orientation,
range_colors, measure_colors, horizontal_spacing,
vertical_spacing, scatter_options, layout_options):
num_of_lanes = len(df)
num_of_rows = num_of_lanes if orientation == 'h' else 1
num_of_cols = 1 if orientation == 'h' else num_of_lanes
if not horizontal_spacing:
horizontal_spacing = 1./num_of_lanes
if not vertical_spacing:
vertical_spacing = 1./num_of_lanes
fig = plotly.tools.make_subplots(
num_of_rows, num_of_cols, print_grid=False,
horizontal_spacing=horizontal_spacing,
vertical_spacing=vertical_spacing
)
# layout
fig['layout'].update(
dict(shapes=[]),
title='Bullet Chart',
height=600,
width=1000,
showlegend=False,
barmode='stack',
annotations=[],
margin=dict(l=120 if orientation == 'h' else 80),
)
# update layout
fig['layout'].update(layout_options)
if orientation == 'h':
width_axis = 'yaxis'
length_axis = 'xaxis'
else:
width_axis = 'xaxis'
length_axis = 'yaxis'
for key in fig['layout']:
if 'xaxis' in key or 'yaxis' in key:
fig['layout'][key]['showgrid'] = False
fig['layout'][key]['zeroline'] = False
if length_axis in key:
fig['layout'][key]['tickwidth'] = 1
if width_axis in key:
fig['layout'][key]['showticklabels'] = False
fig['layout'][key]['range'] = [0, 1]
# narrow domain if 1 bar
if num_of_lanes <= 1:
fig['layout'][width_axis + '1']['domain'] = [0.4, 0.6]
if not range_colors:
range_colors = ['rgb(200, 200, 200)', 'rgb(245, 245, 245)']
if not measure_colors:
measure_colors = ['rgb(31, 119, 180)', 'rgb(176, 196, 221)']
for row in range(num_of_lanes):
# ranges bars
for idx in range(len(df.iloc[row]['ranges'])):
inter_colors = colors.n_colors(
range_colors[0], range_colors[1],
len(df.iloc[row]['ranges']), 'rgb'
)
x = ([sorted(df.iloc[row]['ranges'])[-1 - idx]] if
orientation == 'h' else [0])
y = ([0] if orientation == 'h' else
[sorted(df.iloc[row]['ranges'])[-1 - idx]])
bar = go.Bar(
x=x,
y=y,
marker=dict(
color=inter_colors[-1 - idx]
),
name='ranges',
hoverinfo='x' if orientation == 'h' else 'y',
orientation=orientation,
width=2,
base=0,
xaxis='x{}'.format(row + 1),
yaxis='y{}'.format(row + 1)
)
fig.add_trace(bar)
# measures bars
for idx in range(len(df.iloc[row]['measures'])):
inter_colors = colors.n_colors(
measure_colors[0], measure_colors[1],
len(df.iloc[row]['measures']), 'rgb'
)
x = ([sorted(df.iloc[row]['measures'])[-1 - idx]] if
orientation == 'h' else [0.5])
y = ([0.5] if orientation == 'h'
else [sorted(df.iloc[row]['measures'])[-1 - idx]])
bar = go.Bar(
x=x,
y=y,
marker=dict(
color=inter_colors[-1 - idx]
),
name='measures',
hoverinfo='x' if orientation == 'h' else 'y',
orientation=orientation,
width=0.4,
base=0,
xaxis='x{}'.format(row + 1),
yaxis='y{}'.format(row + 1)
)
fig.add_trace(bar)
# markers
x = df.iloc[row]['markers'] if orientation == 'h' else [0.5]
y = [0.5] if orientation == 'h' else df.iloc[row]['markers']
markers = go.Scatter(
x=x,
y=y,
name='markers',
hoverinfo='x' if orientation == 'h' else 'y',
xaxis='x{}'.format(row + 1),
yaxis='y{}'.format(row + 1),
**scatter_options
)
fig.add_trace(markers)
# titles and subtitles
title = df.iloc[row]['titles']
if 'subtitles' in df:
subtitle = '<br>{}'.format(df.iloc[row]['subtitles'])
else:
subtitle = ''
label = '<b>{}</b>'.format(title) + subtitle
annot = utils.annotation_dict_for_label(
label,
(num_of_lanes - row if orientation == 'h' else row + 1),
num_of_lanes,
vertical_spacing if orientation == 'h' else horizontal_spacing,
'row' if orientation == 'h' else 'col',
True if orientation == 'h' else False,
False
)
fig['layout']['annotations'] += (annot,)
return fig
def create_bullet(data, markers=None, measures=None, ranges=None,
subtitles=None, titles=None, orientation='h',
range_colors=('rgb(200, 200, 200)', 'rgb(245, 245, 245)'),
measure_colors=('rgb(31, 119, 180)', 'rgb(176, 196, 221)'),
horizontal_spacing=None, vertical_spacing=None,
scatter_options={}, **layout_options):
"""
Returns figure for bullet chart.
:param (pd.DataFrame | list | tuple) data: either a list/tuple of
dictionaries or a pandas DataFrame.
:param (str) markers: the column name or dictionary key for the markers in
each subplot.
:param (str) measures: the column name or dictionary key for the measure
bars in each subplot. This bar usually represents the quantitative
measure of performance, usually a list of two values [a, b] and are
the blue bars in the foreground of each subplot by default.
:param (str) ranges: the column name or dictionary key for the qualitative
ranges of performance, usually a 3-item list [bad, okay, good]. They
correspond to the grey bars in the background of each chart.
:param (str) subtitles: the column name or dictionary key for the subtitle
of each subplot chart. The subplots are displayed right underneath
each title.
:param (str) titles: the column name or dictionary key for the main label
of each subplot chart.
:param (bool) orientation: if 'h', the bars are placed horizontally as
rows. If 'v' the bars are placed vertically in the chart.
:param (list) range_colors: a tuple of two colors between which all
the rectangles for the range are drawn. These rectangles are meant to
be qualitative indicators against which the marker and measure bars
are compared.
Default=('rgb(200, 200, 200)', 'rgb(245, 245, 245)')
:param (list) measure_colors: a tuple of two colors which is used to color
the thin quantitative bars in the bullet chart.
Default=('rgb(31, 119, 180)', 'rgb(176, 196, 221)')
:param (float) horizontal_spacing: see the 'horizontal_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (float) vertical_spacing: see the 'vertical_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (dict) scatter_options: describes attributes for the scatter trace
in each subplot such as name and marker size. Call
help(plotly.graph_objs.Scatter) for more information on valid params.
:param layout_options: describes attributes for the layout of the figure
such as title, height and width. Call help(plotly.graph_objs.Layout)
for more information on valid params.
Example 1: Use a Dictionary
```
import plotly
import plotly.plotly as py
import plotly.figure_factory as ff
data = [
{"label": "Revenue", "sublabel": "US$, in thousands",
"range": [150, 225, 300], "performance": [220,270], "point": [250]},
{"label": "Profit", "sublabel": "%", "range": [20, 25, 30],
"performance": [21, 23], "point": [26]},
{"label": "Order Size", "sublabel":"US$, average","range": [350, 500, 600],
"performance": [100,320],"point": [550]},
{"label": "New Customers", "sublabel": "count", "range": [1400, 2000, 2500],
"performance": [1000, 1650],"point": [2100]},
{"label": "Satisfaction", "sublabel": "out of 5","range": [3.5, 4.25, 5],
"performance": [3.2, 4.7], "point": [4.4]}
]
fig = ff.create_bullet(
data, titles='label', subtitles='sublabel', markers='point',
measures='performance', ranges='range', orientation='h',
title='my simple bullet chart'
)
py.iplot(fig)
```
Example 2: Use a DataFrame with Custom Colors
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
data = pd.read_json('https://cdn.rawgit.com/plotly/datasets/master/BulletData.json')
fig = ff.create_bullet(
data, titles='title', markers='markers', measures='measures',
orientation='v', measure_colors=['rgb(14, 52, 75)', 'rgb(31, 141, 127)'],
scatter_options={'marker': {'symbol': 'circle'}}, width=700
)
py.iplot(fig)
```
"""
# validate df
if not pd:
raise exceptions.ImportError(
"'pandas' must be installed for this figure factory."
)
if is_sequence(data):
if not all(isinstance(item, dict) for item in data):
raise exceptions.PlotlyError(
'Every entry of the data argument list, tuple, etc must '
'be a dictionary.'
)
elif not isinstance(data, pd.DataFrame):
raise exceptions.PlotlyError(
'You must input a pandas DataFrame, or a list of dictionaries.'
)
# make DataFrame from data with correct column headers
col_names = ['titles', 'subtitle', 'markers', 'measures', 'ranges']
if is_sequence(data):
df = pd.DataFrame(
[
[d[titles] for d in data] if titles else [''] * len(data),
[d[subtitles] for d in data] if subtitles else [''] * len(data),
[d[markers] for d in data] if markers else [[]] * len(data),
[d[measures] for d in data] if measures else [[]] * len(data),
[d[ranges] for d in data] if ranges else [[]] * len(data),
],
index=col_names
)
elif isinstance(data, pd.DataFrame):
df = pd.DataFrame(
[
data[titles].tolist() if titles else [''] * len(data),
data[subtitles].tolist() if subtitles else [''] * len(data),
data[markers].tolist() if markers else [[]] * len(data),
data[measures].tolist() if measures else [[]] * len(data),
data[ranges].tolist() if ranges else [[]] * len(data),
],
index=col_names
)
df = pd.DataFrame.transpose(df)
# make sure ranges, measures, 'markers' are not NAN or NONE
for needed_key in ['ranges', 'measures', 'markers']:
for idx, r in enumerate(df[needed_key]):
try:
r_is_nan = math.isnan(r)
if r_is_nan or r is None:
df[needed_key][idx] = []
except TypeError:
pass
# validate custom colors
for colors_list in [range_colors, measure_colors]:
if colors_list:
if len(colors_list) != 2:
raise exceptions.PlotlyError(
"Both 'range_colors' or 'measure_colors' must be a list "
"of two valid colors."
)
colors.validate_colors(colors_list)
colors_list = colors.convert_colors_to_same_type(colors_list,
'rgb')[0]
# default scatter options
default_scatter = {
'marker': {'size': 12,
'symbol': 'diamond-tall',
'color': 'rgb(0, 0, 0)'}
}
if scatter_options == {}:
scatter_options.update(default_scatter)
else:
# add default options to scatter_options if they are not present
for k in default_scatter['marker']:
if k not in scatter_options['marker']:
scatter_options['marker'][k] = default_scatter['marker'][k]
fig = _bullet(
df, markers, measures, ranges, subtitles, titles, orientation,
range_colors, measure_colors, horizontal_spacing, vertical_spacing,
scatter_options, layout_options,
)
return fig
|
from __future__ import absolute_import
import collections
import math
from plotly import colors, exceptions, optional_imports
from plotly.figure_factory import utils
import plotly
import plotly.graph_objs as go
pd = optional_imports.get_module('pandas')
def is_sequence(obj):
return (isinstance(obj, collections.Sequence) and
not isinstance(obj, str))
def _bullet(df, markers, measures, ranges, subtitles, titles, orientation,
range_colors, measure_colors, horizontal_spacing,
vertical_spacing, scatter_options, layout_options):
num_of_lanes = len(df)
num_of_rows = num_of_lanes if orientation == 'h' else 1
num_of_cols = 1 if orientation == 'h' else num_of_lanes
if not horizontal_spacing:
horizontal_spacing = 1./num_of_lanes
if not vertical_spacing:
vertical_spacing = 1./num_of_lanes
fig = plotly.tools.make_subplots(
num_of_rows, num_of_cols, print_grid=False,
horizontal_spacing=horizontal_spacing,
vertical_spacing=vertical_spacing
)
# layout
fig['layout'].update(
dict(shapes=[]),
title='Bullet Chart',
height=600,
width=1000,
showlegend=False,
barmode='stack',
annotations=[],
margin=dict(l=120 if orientation == 'h' else 80),
)
# update layout
fig['layout'].update(layout_options)
if orientation == 'h':
width_axis = 'yaxis'
length_axis = 'xaxis'
else:
width_axis = 'xaxis'
length_axis = 'yaxis'
for key in fig['layout']:
if 'xaxis' in key or 'yaxis' in key:
fig['layout'][key]['showgrid'] = False
fig['layout'][key]['zeroline'] = False
if length_axis in key:
fig['layout'][key]['tickwidth'] = 1
if width_axis in key:
fig['layout'][key]['showticklabels'] = False
fig['layout'][key]['range'] = [0, 1]
# narrow domain if 1 bar
if num_of_lanes <= 1:
fig['layout'][width_axis + '1']['domain'] = [0.4, 0.6]
if not range_colors:
range_colors = ['rgb(200, 200, 200)', 'rgb(245, 245, 245)']
if not measure_colors:
measure_colors = ['rgb(31, 119, 180)', 'rgb(176, 196, 221)']
for row in range(num_of_lanes):
# ranges bars
for idx in range(len(df.iloc[row]['ranges'])):
inter_colors = colors.n_colors(
range_colors[0], range_colors[1],
len(df.iloc[row]['ranges']), 'rgb'
)
x = ([sorted(df.iloc[row]['ranges'])[-1 - idx]] if
orientation == 'h' else [0])
y = ([0] if orientation == 'h' else
[sorted(df.iloc[row]['ranges'])[-1 - idx]])
bar = go.Bar(
x=x,
y=y,
marker=dict(
color=inter_colors[-1 - idx]
),
name='ranges',
hoverinfo='x' if orientation == 'h' else 'y',
orientation=orientation,
width=2,
base=0,
xaxis='x{}'.format(row + 1),
yaxis='y{}'.format(row + 1)
)
fig.add_trace(bar)
# measures bars
for idx in range(len(df.iloc[row]['measures'])):
inter_colors = colors.n_colors(
measure_colors[0], measure_colors[1],
len(df.iloc[row]['measures']), 'rgb'
)
x = ([sorted(df.iloc[row]['measures'])[-1 - idx]] if
orientation == 'h' else [0.5])
y = ([0.5] if orientation == 'h'
else [sorted(df.iloc[row]['measures'])[-1 - idx]])
bar = go.Bar(
x=x,
y=y,
marker=dict(
color=inter_colors[-1 - idx]
),
name='measures',
hoverinfo='x' if orientation == 'h' else 'y',
orientation=orientation,
width=0.4,
base=0,
xaxis='x{}'.format(row + 1),
yaxis='y{}'.format(row + 1)
)
fig.add_trace(bar)
# markers
x = df.iloc[row]['markers'] if orientation == 'h' else [0.5]
y = [0.5] if orientation == 'h' else df.iloc[row]['markers']
markers = go.Scatter(
x=x,
y=y,
name='markers',
hoverinfo='x' if orientation == 'h' else 'y',
xaxis='x{}'.format(row + 1),
yaxis='y{}'.format(row + 1),
**scatter_options
)
fig.add_trace(markers)
# titles and subtitles
title = df.iloc[row]['titles']
if 'subtitles' in df:
subtitle = '<br>{}'.format(df.iloc[row]['subtitles'])
else:
subtitle = ''
label = '<b>{}</b>'.format(title) + subtitle
annot = utils.annotation_dict_for_label(
label,
(num_of_lanes - row if orientation == 'h' else row + 1),
num_of_lanes,
vertical_spacing if orientation == 'h' else horizontal_spacing,
'row' if orientation == 'h' else 'col',
True if orientation == 'h' else False,
False
)
fig['layout']['annotations'] += (annot,)
return fig
def create_bullet(data, markers=None, measures=None, ranges=None,
subtitles=None, titles=None, orientation='h',
range_colors=('rgb(200, 200, 200)', 'rgb(245, 245, 245)'),
measure_colors=('rgb(31, 119, 180)', 'rgb(176, 196, 221)'),
horizontal_spacing=None, vertical_spacing=None,
scatter_options={}, **layout_options):
"""
Returns figure for bullet chart.
:param (pd.DataFrame | list | tuple) data: either a list/tuple of
dictionaries or a pandas DataFrame.
:param (str) markers: the column name or dictionary key for the markers in
each subplot.
:param (str) measures: the column name or dictionary key for the measure
bars in each subplot. This bar usually represents the quantitative
measure of performance, usually a list of two values [a, b] and are
the blue bars in the foreground of each subplot by default.
:param (str) ranges: the column name or dictionary key for the qualitative
ranges of performance, usually a 3-item list [bad, okay, good]. They
correspond to the grey bars in the background of each chart.
:param (str) subtitles: the column name or dictionary key for the subtitle
of each subplot chart. The subplots are displayed right underneath
each title.
:param (str) titles: the column name or dictionary key for the main label
of each subplot chart.
:param (bool) orientation: if 'h', the bars are placed horizontally as
rows. If 'v' the bars are placed vertically in the chart.
:param (list) range_colors: a tuple of two colors between which all
the rectangles for the range are drawn. These rectangles are meant to
be qualitative indicators against which the marker and measure bars
are compared.
Default=('rgb(200, 200, 200)', 'rgb(245, 245, 245)')
:param (list) measure_colors: a tuple of two colors which is used to color
the thin quantitative bars in the bullet chart.
Default=('rgb(31, 119, 180)', 'rgb(176, 196, 221)')
:param (float) horizontal_spacing: see the 'horizontal_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (float) vertical_spacing: see the 'vertical_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (dict) scatter_options: describes attributes for the scatter trace
in each subplot such as name and marker size. Call
help(plotly.graph_objs.Scatter) for more information on valid params.
:param layout_options: describes attributes for the layout of the figure
such as title, height and width. Call help(plotly.graph_objs.Layout)
for more information on valid params.
Example 1: Use a Dictionary
```
import plotly
import plotly.plotly as py
import plotly.figure_factory as ff
data = [
{"label": "Revenue", "sublabel": "US$, in thousands",
"range": [150, 225, 300], "performance": [220,270], "point": [250]},
{"label": "Profit", "sublabel": "%", "range": [20, 25, 30],
"performance": [21, 23], "point": [26]},
{"label": "Order Size", "sublabel":"US$, average","range": [350, 500, 600],
"performance": [100,320],"point": [550]},
{"label": "New Customers", "sublabel": "count", "range": [1400, 2000, 2500],
"performance": [1000, 1650],"point": [2100]},
{"label": "Satisfaction", "sublabel": "out of 5","range": [3.5, 4.25, 5],
"performance": [3.2, 4.7], "point": [4.4]}
]
fig = ff.create_bullet(
data, titles='label', subtitles='sublabel', markers='point',
measures='performance', ranges='range', orientation='h',
title='my simple bullet chart'
)
py.iplot(fig)
```
Example 2: Use a DataFrame with Custom Colors
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
data = pd.read_json('https://cdn.rawgit.com/plotly/datasets/master/BulletData.json')
fig = ff.create_bullet(
data, titles='title', markers='markers', measures='measures',
orientation='v', measure_colors=['rgb(14, 52, 75)', 'rgb(31, 141, 127)'],
scatter_options={'marker': {'symbol': 'circle'}}, width=700
)
py.iplot(fig)
```
"""
# validate df
if not pd:
raise exceptions.ImportError(
"'pandas' must be installed for this figure factory."
)
if is_sequence(data):
if not all(isinstance(item, dict) for item in data):
raise exceptions.PlotlyError(
'Every entry of the data argument list, tuple, etc must '
'be a dictionary.'
)
elif not isinstance(data, pd.DataFrame):
raise exceptions.PlotlyError(
'You must input a pandas DataFrame, or a list of dictionaries.'
)
# make DataFrame from data with correct column headers
col_names = ['titles', 'subtitle', 'markers', 'measures', 'ranges']
if is_sequence(data):
df = pd.DataFrame(
[
[d[titles] for d in data] if titles else [''] * len(data),
[d[subtitles] for d in data] if subtitles else [''] * len(data),
[d[markers] for d in data] if markers else [[]] * len(data),
[d[measures] for d in data] if measures else [[]] * len(data),
[d[ranges] for d in data] if ranges else [[]] * len(data),
],
index=col_names
)
elif isinstance(data, pd.DataFrame):
df = pd.DataFrame(
[
data[titles].tolist() if titles else [''] * len(data),
data[subtitles].tolist() if subtitles else [''] * len(data),
data[markers].tolist() if markers else [[]] * len(data),
data[measures].tolist() if measures else [[]] * len(data),
data[ranges].tolist() if ranges else [[]] * len(data),
],
index=col_names
)
df = pd.DataFrame.transpose(df)
# make sure ranges, measures, 'markers' are not NAN or NONE
for needed_key in ['ranges', 'measures', 'markers']:
for idx, r in enumerate(df[needed_key]):
try:
r_is_nan = math.isnan(r)
if r_is_nan or r is None:
df[needed_key][idx] = []
except TypeError:
pass
# validate custom colors
for colors_list in [range_colors, measure_colors]:
if colors_list:
if len(colors_list) != 2:
raise exceptions.PlotlyError(
"Both 'range_colors' or 'measure_colors' must be a list "
"of two valid colors."
)
colors.validate_colors(colors_list)
colors_list = colors.convert_colors_to_same_type(colors_list,
'rgb')[0]
# default scatter options
default_scatter = {
'marker': {'size': 12,
'symbol': 'diamond-tall',
'color': 'rgb(0, 0, 0)'}
}
if scatter_options == {}:
scatter_options.update(default_scatter)
else:
# add default options to scatter_options if they are not present
for k in default_scatter['marker']:
if k not in scatter_options['marker']:
scatter_options['marker'][k] = default_scatter['marker'][k]
fig = _bullet(
df, markers, measures, ranges, subtitles, titles, orientation,
range_colors, measure_colors, horizontal_spacing, vertical_spacing,
scatter_options, layout_options,
)
return fig
|
en
| 0.644037
|
# layout # update layout # narrow domain if 1 bar # ranges bars # measures bars # markers # titles and subtitles Returns figure for bullet chart. :param (pd.DataFrame | list | tuple) data: either a list/tuple of dictionaries or a pandas DataFrame. :param (str) markers: the column name or dictionary key for the markers in each subplot. :param (str) measures: the column name or dictionary key for the measure bars in each subplot. This bar usually represents the quantitative measure of performance, usually a list of two values [a, b] and are the blue bars in the foreground of each subplot by default. :param (str) ranges: the column name or dictionary key for the qualitative ranges of performance, usually a 3-item list [bad, okay, good]. They correspond to the grey bars in the background of each chart. :param (str) subtitles: the column name or dictionary key for the subtitle of each subplot chart. The subplots are displayed right underneath each title. :param (str) titles: the column name or dictionary key for the main label of each subplot chart. :param (bool) orientation: if 'h', the bars are placed horizontally as rows. If 'v' the bars are placed vertically in the chart. :param (list) range_colors: a tuple of two colors between which all the rectangles for the range are drawn. These rectangles are meant to be qualitative indicators against which the marker and measure bars are compared. Default=('rgb(200, 200, 200)', 'rgb(245, 245, 245)') :param (list) measure_colors: a tuple of two colors which is used to color the thin quantitative bars in the bullet chart. Default=('rgb(31, 119, 180)', 'rgb(176, 196, 221)') :param (float) horizontal_spacing: see the 'horizontal_spacing' param in plotly.tools.make_subplots. Ranges between 0 and 1. :param (float) vertical_spacing: see the 'vertical_spacing' param in plotly.tools.make_subplots. Ranges between 0 and 1. :param (dict) scatter_options: describes attributes for the scatter trace in each subplot such as name and marker size. Call help(plotly.graph_objs.Scatter) for more information on valid params. :param layout_options: describes attributes for the layout of the figure such as title, height and width. Call help(plotly.graph_objs.Layout) for more information on valid params. Example 1: Use a Dictionary ``` import plotly import plotly.plotly as py import plotly.figure_factory as ff data = [ {"label": "Revenue", "sublabel": "US$, in thousands", "range": [150, 225, 300], "performance": [220,270], "point": [250]}, {"label": "Profit", "sublabel": "%", "range": [20, 25, 30], "performance": [21, 23], "point": [26]}, {"label": "Order Size", "sublabel":"US$, average","range": [350, 500, 600], "performance": [100,320],"point": [550]}, {"label": "New Customers", "sublabel": "count", "range": [1400, 2000, 2500], "performance": [1000, 1650],"point": [2100]}, {"label": "Satisfaction", "sublabel": "out of 5","range": [3.5, 4.25, 5], "performance": [3.2, 4.7], "point": [4.4]} ] fig = ff.create_bullet( data, titles='label', subtitles='sublabel', markers='point', measures='performance', ranges='range', orientation='h', title='my simple bullet chart' ) py.iplot(fig) ``` Example 2: Use a DataFrame with Custom Colors ``` import plotly.plotly as py import plotly.figure_factory as ff import pandas as pd data = pd.read_json('https://cdn.rawgit.com/plotly/datasets/master/BulletData.json') fig = ff.create_bullet( data, titles='title', markers='markers', measures='measures', orientation='v', measure_colors=['rgb(14, 52, 75)', 'rgb(31, 141, 127)'], scatter_options={'marker': {'symbol': 'circle'}}, width=700 ) py.iplot(fig) ``` # validate df # make DataFrame from data with correct column headers # make sure ranges, measures, 'markers' are not NAN or NONE # validate custom colors # default scatter options # add default options to scatter_options if they are not present
| 2.476636
| 2
|
postGIS_tools/tests/test_db_creation.py
|
AltaPlanning/postGIS-tools
| 4
|
6629183
|
import postGIS_tools as pGIS
from postGIS_tools.configurations import get_postGIS_config, make_uri
from ward import test
def _test_make_new_database(hostname):
DATABASE = "test_db"
user_config, super_user_config = get_postGIS_config(verbose=False)
uri = make_uri(DATABASE, **user_config[hostname])
super_uri = make_uri(**super_user_config[hostname])
# Make a new database
pGIS.make_new_database(uri_defaultdb=super_uri, uri_newdb=uri, debug=False)
# Confirm it exists
assert pGIS.database_exists(DATABASE, uri=uri, default_db=super_user_config[hostname]["database"], debug=False)
@test("make_new_database() makes a database on localhost")
def _():
_test_make_new_database("localhost")
@test("make_new_database() makes a database on digital ocean")
def _():
_test_make_new_database("digitalocean_projects")
|
import postGIS_tools as pGIS
from postGIS_tools.configurations import get_postGIS_config, make_uri
from ward import test
def _test_make_new_database(hostname):
DATABASE = "test_db"
user_config, super_user_config = get_postGIS_config(verbose=False)
uri = make_uri(DATABASE, **user_config[hostname])
super_uri = make_uri(**super_user_config[hostname])
# Make a new database
pGIS.make_new_database(uri_defaultdb=super_uri, uri_newdb=uri, debug=False)
# Confirm it exists
assert pGIS.database_exists(DATABASE, uri=uri, default_db=super_user_config[hostname]["database"], debug=False)
@test("make_new_database() makes a database on localhost")
def _():
_test_make_new_database("localhost")
@test("make_new_database() makes a database on digital ocean")
def _():
_test_make_new_database("digitalocean_projects")
|
en
| 0.510211
|
# Make a new database # Confirm it exists
| 2.243029
| 2
|
products/migrations/0011_auto_20200520_1323.py
|
stanwood/traidoo-api
| 3
|
6629184
|
<gh_stars>1-10
# Generated by Django 2.2.10 on 2020-05-20 13:23
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("products", "0010_auto_20200206_1358"),
]
operations = [
migrations.AlterField(
model_name="product",
name="amount",
field=models.DecimalField(
decimal_places=2,
max_digits=10,
validators=[
django.core.validators.MinValueValidator(
0, message="Amount should not be negative"
)
],
verbose_name="Amount in lot",
),
),
migrations.AlterField(
model_name="product",
name="category",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="categories.Category",
verbose_name="Category",
),
),
migrations.AlterField(
model_name="product",
name="delivery_charge",
field=models.DecimalField(
decimal_places=2,
default=0,
max_digits=10,
validators=[
django.core.validators.MinValueValidator(
0, message="Delivery charge should not be negative"
)
],
),
),
migrations.AlterField(
model_name="product",
name="description",
field=models.TextField(verbose_name="Description"),
),
migrations.AlterField(
model_name="product",
name="is_gluten_free",
field=models.BooleanField(default=False, verbose_name="Is gluten free"),
),
migrations.AlterField(
model_name="product",
name="is_gmo_free",
field=models.BooleanField(default=False, verbose_name="Is gmo free"),
),
migrations.AlterField(
model_name="product",
name="is_grazing_animal",
field=models.BooleanField(default=False, verbose_name="Is grazing animal"),
),
migrations.AlterField(
model_name="product",
name="is_organic",
field=models.BooleanField(default=False, verbose_name="Is organic"),
),
migrations.AlterField(
model_name="product",
name="is_vegan",
field=models.BooleanField(default=False, verbose_name="Is vegan"),
),
migrations.AlterField(
model_name="product",
name="item_quantity",
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
validators=[
django.core.validators.MinValueValidator(
0, message="Items quantity should not be negative"
)
],
),
),
migrations.AlterField(
model_name="product",
name="name",
field=models.CharField(max_length=255, verbose_name="Name"),
),
migrations.AlterField(
model_name="product",
name="price",
field=models.DecimalField(
decimal_places=2,
max_digits=10,
validators=[
django.core.validators.MinValueValidator(
0, message="Price should not be negative"
)
],
verbose_name="Price",
),
),
migrations.AlterField(
model_name="product",
name="region",
field=models.ForeignKey(
help_text="Region of origin",
on_delete=django.db.models.deletion.PROTECT,
related_name="products",
to="common.Region",
verbose_name="Region of origin",
),
),
migrations.AlterField(
model_name="product",
name="regions",
field=models.ManyToManyField(
help_text="The associated regions the product should be available in",
to="common.Region",
verbose_name="Available in regions",
),
),
migrations.AlterField(
model_name="product",
name="seller",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
verbose_name="Seller",
),
),
migrations.AlterField(
model_name="product",
name="unit",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Unit"
),
),
migrations.AlterField(
model_name="product",
name="vat",
field=models.DecimalField(
decimal_places=2,
max_digits=10,
validators=[
django.core.validators.MaxValueValidator(
100, "VAT should not be more than 100%"
),
django.core.validators.MinValueValidator(
0, message="VAT should not be negative"
),
],
verbose_name="VAT rate",
),
),
]
|
# Generated by Django 2.2.10 on 2020-05-20 13:23
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("products", "0010_auto_20200206_1358"),
]
operations = [
migrations.AlterField(
model_name="product",
name="amount",
field=models.DecimalField(
decimal_places=2,
max_digits=10,
validators=[
django.core.validators.MinValueValidator(
0, message="Amount should not be negative"
)
],
verbose_name="Amount in lot",
),
),
migrations.AlterField(
model_name="product",
name="category",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="categories.Category",
verbose_name="Category",
),
),
migrations.AlterField(
model_name="product",
name="delivery_charge",
field=models.DecimalField(
decimal_places=2,
default=0,
max_digits=10,
validators=[
django.core.validators.MinValueValidator(
0, message="Delivery charge should not be negative"
)
],
),
),
migrations.AlterField(
model_name="product",
name="description",
field=models.TextField(verbose_name="Description"),
),
migrations.AlterField(
model_name="product",
name="is_gluten_free",
field=models.BooleanField(default=False, verbose_name="Is gluten free"),
),
migrations.AlterField(
model_name="product",
name="is_gmo_free",
field=models.BooleanField(default=False, verbose_name="Is gmo free"),
),
migrations.AlterField(
model_name="product",
name="is_grazing_animal",
field=models.BooleanField(default=False, verbose_name="Is grazing animal"),
),
migrations.AlterField(
model_name="product",
name="is_organic",
field=models.BooleanField(default=False, verbose_name="Is organic"),
),
migrations.AlterField(
model_name="product",
name="is_vegan",
field=models.BooleanField(default=False, verbose_name="Is vegan"),
),
migrations.AlterField(
model_name="product",
name="item_quantity",
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
validators=[
django.core.validators.MinValueValidator(
0, message="Items quantity should not be negative"
)
],
),
),
migrations.AlterField(
model_name="product",
name="name",
field=models.CharField(max_length=255, verbose_name="Name"),
),
migrations.AlterField(
model_name="product",
name="price",
field=models.DecimalField(
decimal_places=2,
max_digits=10,
validators=[
django.core.validators.MinValueValidator(
0, message="Price should not be negative"
)
],
verbose_name="Price",
),
),
migrations.AlterField(
model_name="product",
name="region",
field=models.ForeignKey(
help_text="Region of origin",
on_delete=django.db.models.deletion.PROTECT,
related_name="products",
to="common.Region",
verbose_name="Region of origin",
),
),
migrations.AlterField(
model_name="product",
name="regions",
field=models.ManyToManyField(
help_text="The associated regions the product should be available in",
to="common.Region",
verbose_name="Available in regions",
),
),
migrations.AlterField(
model_name="product",
name="seller",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
verbose_name="Seller",
),
),
migrations.AlterField(
model_name="product",
name="unit",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Unit"
),
),
migrations.AlterField(
model_name="product",
name="vat",
field=models.DecimalField(
decimal_places=2,
max_digits=10,
validators=[
django.core.validators.MaxValueValidator(
100, "VAT should not be more than 100%"
),
django.core.validators.MinValueValidator(
0, message="VAT should not be negative"
),
],
verbose_name="VAT rate",
),
),
]
|
en
| 0.755214
|
# Generated by Django 2.2.10 on 2020-05-20 13:23
| 1.600097
| 2
|
src/generator.py
|
bmoxon/azfinsim
| 5
|
6629185
|
<filename>src/generator.py
#! /usr/bin/env python3
#
# generator.py: Load the AzFinsim Cache with randomly generated trade data of specified length
#
import argparse
import time
import sys
import psutil
import logging
from multiprocessing.pool import ThreadPool
from azure.identity import DefaultAzureCredential
from config import *
import config
import azlog
import xmlutils
import utils
import secrets
from getargs import getargs
log = azlog.getLogger(__name__)
#-- todo: fix nbytes for var, and fold these variables into create_trades
format=""
tradenum=0
batchsize=10000
#-- legacy serial method
def create_trades(tradenum):
if (args.format == "eyxml"):
xmlstring = xmlutils.GenerateTradeEY(tradenum,1)
elif (format == "varxml"):
nbytes=1 #-- TBD need to pass this
xmlstring = xmlutils.GenerateTrade(tradenum,nbytes)
utils.PutTrade(cache_type,"input",r,format,tradenum,xmlstring)
#-- pipeline / batching method
def create_trade_range(start_trade):
batchsent=0
stop_trade=start_trade+batchsize
log.info("Generating batch: %d-%d",start_trade,stop_trade-1)
with r.pipeline() as pipe:
for tradenum in range (start_trade, stop_trade):
keyname = "<KEY>" % (tradenum)
nbytes=1
xmlstring = xmlutils.GenerateTradeEY(tradenum,nbytes)
pipe.set(keyname,xmlstring)
log.info("Executing batch: %d-%d",start_trade,stop_trade)
pipe.execute()
if __name__ == "__main__":
#-- grab cli args
args = getargs("generator")
#-- verbosity
azlog.setDebug(args.verbose)
log.info("Starting trade generator...")
#-- pull keys/passwords from the keyvault
log.info("Reading keyvault secrets")
secrets.ReadKVSecrets()
log.info("Done.")
#-- set threads to vcore count unless specified
vcores = psutil.cpu_count(logical=True)
pcores = psutil.cpu_count(logical=False)
log.info("Generator Client: Physical Cores: %d Logical Cores: %d" % (pcores,vcores))
if (args.threads): threads = args.threads
else: threads = vcores
#-- open connection to cache
log.info("Setting up cache connection")
if (args.cache_type == "redis" or args.cache_type == "hazelcast"):
r = utils.SetupCacheConn(args.cache_type,args.cache_name,args.cache_port,config.AZFINSIM_REDISKEY,args.cache_ssl)
if r is None:
log.error("Cannot connect to Cache DB: %s, %s, %s" % args.cache_name,args.cache_port,args.cache_key,args.cache_ssl)
sys.exit(1)
log.info("Done.")
#nbytes=args.nbytes
cache_type = args.cache_type
format = args.format
start_trade=args.start_trade
stop_trade=start_trade+args.trade_window
thread_pool = ThreadPool(threads)
log.info(f'Starting the thread pool and filling the cache (%d threads)', threads)
log.info(f'Generating %d trades in range %d to %d', args.trade_window,start_trade,stop_trade-1)
log.info(f'Batchsize for pipeline to redis: %d',batchsize)
start=time.perf_counter()
# thread_pool.map(create_trades, range(start_trade,stop_trade))
thread_pool.map(create_trade_range, range(start_trade, stop_trade, batchsize))
end=time.perf_counter()
timedelta=end-start
log.info("Done.")
log.info("Cache filled with %d trades in %.12f seconds" % (args.trade_window,timedelta))
|
<filename>src/generator.py
#! /usr/bin/env python3
#
# generator.py: Load the AzFinsim Cache with randomly generated trade data of specified length
#
import argparse
import time
import sys
import psutil
import logging
from multiprocessing.pool import ThreadPool
from azure.identity import DefaultAzureCredential
from config import *
import config
import azlog
import xmlutils
import utils
import secrets
from getargs import getargs
log = azlog.getLogger(__name__)
#-- todo: fix nbytes for var, and fold these variables into create_trades
format=""
tradenum=0
batchsize=10000
#-- legacy serial method
def create_trades(tradenum):
if (args.format == "eyxml"):
xmlstring = xmlutils.GenerateTradeEY(tradenum,1)
elif (format == "varxml"):
nbytes=1 #-- TBD need to pass this
xmlstring = xmlutils.GenerateTrade(tradenum,nbytes)
utils.PutTrade(cache_type,"input",r,format,tradenum,xmlstring)
#-- pipeline / batching method
def create_trade_range(start_trade):
batchsent=0
stop_trade=start_trade+batchsize
log.info("Generating batch: %d-%d",start_trade,stop_trade-1)
with r.pipeline() as pipe:
for tradenum in range (start_trade, stop_trade):
keyname = "<KEY>" % (tradenum)
nbytes=1
xmlstring = xmlutils.GenerateTradeEY(tradenum,nbytes)
pipe.set(keyname,xmlstring)
log.info("Executing batch: %d-%d",start_trade,stop_trade)
pipe.execute()
if __name__ == "__main__":
#-- grab cli args
args = getargs("generator")
#-- verbosity
azlog.setDebug(args.verbose)
log.info("Starting trade generator...")
#-- pull keys/passwords from the keyvault
log.info("Reading keyvault secrets")
secrets.ReadKVSecrets()
log.info("Done.")
#-- set threads to vcore count unless specified
vcores = psutil.cpu_count(logical=True)
pcores = psutil.cpu_count(logical=False)
log.info("Generator Client: Physical Cores: %d Logical Cores: %d" % (pcores,vcores))
if (args.threads): threads = args.threads
else: threads = vcores
#-- open connection to cache
log.info("Setting up cache connection")
if (args.cache_type == "redis" or args.cache_type == "hazelcast"):
r = utils.SetupCacheConn(args.cache_type,args.cache_name,args.cache_port,config.AZFINSIM_REDISKEY,args.cache_ssl)
if r is None:
log.error("Cannot connect to Cache DB: %s, %s, %s" % args.cache_name,args.cache_port,args.cache_key,args.cache_ssl)
sys.exit(1)
log.info("Done.")
#nbytes=args.nbytes
cache_type = args.cache_type
format = args.format
start_trade=args.start_trade
stop_trade=start_trade+args.trade_window
thread_pool = ThreadPool(threads)
log.info(f'Starting the thread pool and filling the cache (%d threads)', threads)
log.info(f'Generating %d trades in range %d to %d', args.trade_window,start_trade,stop_trade-1)
log.info(f'Batchsize for pipeline to redis: %d',batchsize)
start=time.perf_counter()
# thread_pool.map(create_trades, range(start_trade,stop_trade))
thread_pool.map(create_trade_range, range(start_trade, stop_trade, batchsize))
end=time.perf_counter()
timedelta=end-start
log.info("Done.")
log.info("Cache filled with %d trades in %.12f seconds" % (args.trade_window,timedelta))
|
en
| 0.310978
|
#! /usr/bin/env python3 # # generator.py: Load the AzFinsim Cache with randomly generated trade data of specified length # #-- todo: fix nbytes for var, and fold these variables into create_trades #-- legacy serial method #-- TBD need to pass this #-- pipeline / batching method #-- grab cli args #-- verbosity #-- pull keys/passwords from the keyvault #-- set threads to vcore count unless specified #-- open connection to cache #nbytes=args.nbytes # thread_pool.map(create_trades, range(start_trade,stop_trade))
| 2.274997
| 2
|
adles/interfaces/docker_interface.py
|
devAmoghS/ADLES
| 0
|
6629186
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
try:
import docker # NOTE(cgoes): has not been tested with Python 3.6 yet
except ImportError as ex:
logging.error("Could not import docker module. "
"Install it using 'pip install docker'")
raise ex
import adles.utils as utils
from adles.interfaces import Interface
class DockerInterface(Interface):
"""Generic interface for the Docker platform."""
__version__ = "0.2.2"
def __init__(self, infra, spec):
"""
:param dict infra: Dict of infrastructure information
:param dict spec: Dict of a parsed specification
"""
super(self.__class__, self).__init__(infra=infra, spec=spec)
self._log = logging.getLogger(str(self.__class__))
self._log.debug("Initializing %s %s", self.__class__, self.__version__)
# If needed, a wrapper class that simplifies
# the creation of containers will be made
# Reference:
# https://docker-py.readthedocs.io/en/stable/client.html#client-reference
# Initialize the Docker client
self.client = docker.DockerClient(base_url=infra.get("url",
"unix:///var/run/"
"docker.sock"),
tls=bool(infra.get("tls", True)))
# Verify the connection to the client
self.client.ping()
self._log.debug("System info : %s", str(self.client.info()))
self._log.debug("System version : %s", str(self.client.version()))
# Authenticate to registry, if configured
if "registry" in self.infra:
reg = self.infra["registry"]
reg_logins = utils.read_json(reg["login-file"])
self.client.login(username=reg_logins["user"],
password=reg_logins["pass"],
registry=reg["url"])
# List images currently on the server
self._log.debug("Images: %s", str(self.client.images.list()))
def create_masters(self):
pass
def deploy_environment(self):
pass
def cleanup_masters(self, network_cleanup=False):
pass
def cleanup_environment(self, network_cleanup=False):
pass
def __str__(self):
return str(self.client.info() + "\nVersion:\t" + self.client.version())
def __eq__(self, other):
return super(self.__class__, self).__eq__(other) \
and self.client == other.client
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
try:
import docker # NOTE(cgoes): has not been tested with Python 3.6 yet
except ImportError as ex:
logging.error("Could not import docker module. "
"Install it using 'pip install docker'")
raise ex
import adles.utils as utils
from adles.interfaces import Interface
class DockerInterface(Interface):
"""Generic interface for the Docker platform."""
__version__ = "0.2.2"
def __init__(self, infra, spec):
"""
:param dict infra: Dict of infrastructure information
:param dict spec: Dict of a parsed specification
"""
super(self.__class__, self).__init__(infra=infra, spec=spec)
self._log = logging.getLogger(str(self.__class__))
self._log.debug("Initializing %s %s", self.__class__, self.__version__)
# If needed, a wrapper class that simplifies
# the creation of containers will be made
# Reference:
# https://docker-py.readthedocs.io/en/stable/client.html#client-reference
# Initialize the Docker client
self.client = docker.DockerClient(base_url=infra.get("url",
"unix:///var/run/"
"docker.sock"),
tls=bool(infra.get("tls", True)))
# Verify the connection to the client
self.client.ping()
self._log.debug("System info : %s", str(self.client.info()))
self._log.debug("System version : %s", str(self.client.version()))
# Authenticate to registry, if configured
if "registry" in self.infra:
reg = self.infra["registry"]
reg_logins = utils.read_json(reg["login-file"])
self.client.login(username=reg_logins["user"],
password=reg_logins["pass"],
registry=reg["url"])
# List images currently on the server
self._log.debug("Images: %s", str(self.client.images.list()))
def create_masters(self):
pass
def deploy_environment(self):
pass
def cleanup_masters(self, network_cleanup=False):
pass
def cleanup_environment(self, network_cleanup=False):
pass
def __str__(self):
return str(self.client.info() + "\nVersion:\t" + self.client.version())
def __eq__(self, other):
return super(self.__class__, self).__eq__(other) \
and self.client == other.client
|
en
| 0.829729
|
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE(cgoes): has not been tested with Python 3.6 yet Generic interface for the Docker platform. :param dict infra: Dict of infrastructure information :param dict spec: Dict of a parsed specification # If needed, a wrapper class that simplifies # the creation of containers will be made # Reference: # https://docker-py.readthedocs.io/en/stable/client.html#client-reference # Initialize the Docker client # Verify the connection to the client # Authenticate to registry, if configured # List images currently on the server
| 2.014465
| 2
|
MachineLearning/iris-k-nn-classifier.py
|
bahattin-urganci/python-training
| 0
|
6629187
|
import random
from scipy.spatial import distance
def euc(a, b):
return distance.euclidean(a, b)
class ScrappyKNN():
def fit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
def predict(self, X_test):
predictions = []
for row in X_test:
#label = self.closest(row)
label=random.choice(self.y_train)
predictions.append(label)
return predictions
def closest(self, row):
best_dist = euc(row, self.X_train[0])
best_index = 0
for i in range(1, len(self.X_train)):
dist = euc(row, self.X_train[i])
if dist < best_dist:
best_dist = dist
best_index = i
return self.y_train[best_index]
from sklearn.datasets import load_iris
from sklearn import tree
iris = load_iris()
X = iris.data
y = iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5)
#from sklearn.neighbors import KNeighborsClassifier
my_classifier = ScrappyKNN()
my_classifier.fit(X_train, y_train)
predictions = my_classifier.predict(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, predictions))
|
import random
from scipy.spatial import distance
def euc(a, b):
return distance.euclidean(a, b)
class ScrappyKNN():
def fit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
def predict(self, X_test):
predictions = []
for row in X_test:
#label = self.closest(row)
label=random.choice(self.y_train)
predictions.append(label)
return predictions
def closest(self, row):
best_dist = euc(row, self.X_train[0])
best_index = 0
for i in range(1, len(self.X_train)):
dist = euc(row, self.X_train[i])
if dist < best_dist:
best_dist = dist
best_index = i
return self.y_train[best_index]
from sklearn.datasets import load_iris
from sklearn import tree
iris = load_iris()
X = iris.data
y = iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5)
#from sklearn.neighbors import KNeighborsClassifier
my_classifier = ScrappyKNN()
my_classifier.fit(X_train, y_train)
predictions = my_classifier.predict(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, predictions))
|
en
| 0.364447
|
#label = self.closest(row) #from sklearn.neighbors import KNeighborsClassifier
| 3.07953
| 3
|
beastx/modules/Adultzone.py
|
Yaamiin/Beast-X
| 1
|
6629188
|
<reponame>Yaamiin/Beast-X
# credits to Pawan
# ported to Pawanbir by @I_AM_PAWANBIR
# will be adding more soon
import asyncio
import os
import urllib
import requests
from userbot import *
from userbot.utils import *
@bot.on(admin_cmd("boobs$"))
async def boobs(event):
if not os.path.isdir(Var.TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(Var.TEMP_DOWNLOAD_DIRECTORY)
pic_loc = os.path.join(Var.TEMP_DOWNLOAD_DIRECTORY, "bobs.jpg")
a = await event.reply("Finding some big boobs for u 🧐")
await asyncio.sleep(0.5)
await a.edit("Sending some big boobs🤪")
nsfw = requests.get("http://api.oboobs.ru/noise/1").json()[0]["preview"]
urllib.request.urlretrieve("http://media.oboobs.ru/{}".format(nsfw), pic_loc)
await event.client.send_file(event.chat_id, pic_loc, force_document=False)
os.remove(pic_loc)
await event.delete()
await a.delete()
@bot.on(admin_cmd("butts$"))
async def butts(event):
if not os.path.isdir(Var.TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(Var.TEMP_DOWNLOAD_DIRECTORY)
pic_loc = os.path.join(Var.TEMP_DOWNLOAD_DIRECTORY, "butts.jpg")
a = await event.reply("Finding some beautiful butts for u🧐")
await asyncio.sleep(0.5)
await a.edit("Sending some beautiful butts🤪")
nsfw = requests.get("http://api.obutts.ru/noise/1").json()[0]["preview"]
urllib.request.urlretrieve("http://media.obutts.ru/{}".format(nsfw), pic_loc)
await event.client.send_file(event.chat_id, pic_loc, force_document=False)
os.remove(pic_loc)
await event.delete()
await a.delete()
CMD_HELP.update(
{
"adultzone": "**Plugin : **`adultzone`\
\n\n**Syntax : **`.boobs`\
\n**Usage :** Searchs and sends random B××Bs image\
\n\n**Syntax :**`.butts`\
\n**Usage :** Hadii aad 18 ka yartahay laabo\
\n\n\n __**WARNING!! 18+ MODULE**__"
}
)
|
# credits to Pawan
# ported to Pawanbir by @I_AM_PAWANBIR
# will be adding more soon
import asyncio
import os
import urllib
import requests
from userbot import *
from userbot.utils import *
@bot.on(admin_cmd("boobs$"))
async def boobs(event):
if not os.path.isdir(Var.TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(Var.TEMP_DOWNLOAD_DIRECTORY)
pic_loc = os.path.join(Var.TEMP_DOWNLOAD_DIRECTORY, "bobs.jpg")
a = await event.reply("Finding some big boobs for u 🧐")
await asyncio.sleep(0.5)
await a.edit("Sending some big boobs🤪")
nsfw = requests.get("http://api.oboobs.ru/noise/1").json()[0]["preview"]
urllib.request.urlretrieve("http://media.oboobs.ru/{}".format(nsfw), pic_loc)
await event.client.send_file(event.chat_id, pic_loc, force_document=False)
os.remove(pic_loc)
await event.delete()
await a.delete()
@bot.on(admin_cmd("butts$"))
async def butts(event):
if not os.path.isdir(Var.TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(Var.TEMP_DOWNLOAD_DIRECTORY)
pic_loc = os.path.join(Var.TEMP_DOWNLOAD_DIRECTORY, "butts.jpg")
a = await event.reply("Finding some beautiful butts for u🧐")
await asyncio.sleep(0.5)
await a.edit("Sending some beautiful butts🤪")
nsfw = requests.get("http://api.obutts.ru/noise/1").json()[0]["preview"]
urllib.request.urlretrieve("http://media.obutts.ru/{}".format(nsfw), pic_loc)
await event.client.send_file(event.chat_id, pic_loc, force_document=False)
os.remove(pic_loc)
await event.delete()
await a.delete()
CMD_HELP.update(
{
"adultzone": "**Plugin : **`adultzone`\
\n\n**Syntax : **`.boobs`\
\n**Usage :** Searchs and sends random B××Bs image\
\n\n**Syntax :**`.butts`\
\n**Usage :** Hadii aad 18 ka yartahay laabo\
\n\n\n __**WARNING!! 18+ MODULE**__"
}
)
|
en
| 0.882245
|
# credits to Pawan # ported to Pawanbir by @I_AM_PAWANBIR # will be adding more soon
| 2.443853
| 2
|
bikeshed/config/status.py
|
tpluscode/bikeshed
| 0
|
6629189
|
<filename>bikeshed/config/status.py
from ..messages import *
from .main import englishFromList
shortToLongStatus = {
"DREAM": "A Collection of Interesting Ideas",
"LS": "Living Standard",
"LS-COMMIT": "Commit Snapshot",
"LS-BRANCH": "Branch Snapshot",
"LS-PR": "PR Preview",
"LD": "Living Document",
"DRAFT-FINDING": "Draft Finding",
"FINDING": "Finding",
"whatwg/RD": "Review Draft",
"w3c/ED": "Editor's Draft",
"w3c/WD": "W3C Working Draft",
"w3c/FPWD": "W3C First Public Working Draft",
"w3c/LCWD": "W3C Last Call Working Draft",
"w3c/CR": "W3C Candidate Recommendation Snapshot",
"w3c/CRD": "W3C Candidate Recommendation Draft",
"w3c/PR": "W3C Proposed Recommendation",
"w3c/REC": "W3C Recommendation",
"w3c/PER": "W3C Proposed Edited Recommendation",
"w3c/WG-NOTE": "W3C Working Group Note",
"w3c/IG-NOTE": "W3C Interest Group Note",
"w3c/NOTE": "W3C Note",
"w3c/NOTE-ED": "Editor's Draft",
"w3c/NOTE-WD": "W3C Working Draft",
"w3c/NOTE-FPWD": "W3C First Public Working Draft",
"w3c/MO": "W3C Member-only Draft",
"w3c/UD": "Unofficial Proposal Draft",
"w3c/CG-DRAFT": "Draft Community Group Report",
"w3c/CG-FINAL": "Final Community Group Report",
"tc39/STAGE0": "Stage 0: Strawman",
"tc39/STAGE1": "Stage 1: Proposal",
"tc39/STAGE2": "Stage 2: Draft",
"tc39/STAGE3": "Stage 3: Candidate",
"tc39/STAGE4": "Stage 4: Finished",
"iso/I": "Issue",
"iso/DR": "Defect Report",
"iso/D": "Draft Proposal",
"iso/P": "Published Proposal",
"iso/MEET": "Meeting Announcements",
"iso/RESP": "Records of Response",
"iso/MIN": "Minutes",
"iso/ER": "Editor's Report",
"iso/SD": "Standing Document",
"iso/PWI": "Preliminary Work Item",
"iso/NP": "New Proposal",
"iso/NWIP": "New Work Item Proposal",
"iso/WD": "Working Draft",
"iso/CD": "Committee Draft",
"iso/FCD": "Final Committee Draft",
"iso/DIS": "Draft International Standard",
"iso/FDIS": "Final Draft International Standard",
"iso/PRF": "Proof of a new International Standard",
"iso/IS": "International Standard",
"iso/TR": "Technical Report",
"iso/DTR": "Draft Technical Report",
"iso/TS": "Technical Specification",
"iso/DTS": "Draft Technical Specification",
"iso/PAS": "Publicly Available Specification",
"iso/TTA": "Technology Trends Assessment",
"iso/IWA": "International Workshop Agreement",
"iso/COR": "Technical Corrigendum",
"iso/GUIDE": "Guidance to Technical Committees",
"iso/NP-AMD": "New Proposal Amendment",
"iso/AWI-AMD": "Approved new Work Item Amendment",
"iso/WD-AMD": "Working Draft Amendment",
"iso/CD-AMD": "Committee Draft Amendment",
"iso/PD-AMD": "Proposed Draft Amendment",
"iso/FPD-AMD": "Final Proposed Draft Amendment",
"iso/D-AMD": "Draft Amendment",
"iso/FD-AMD": "Final Draft Amendment",
"iso/PRF-AMD": "Proof Amendment",
"iso/AMD": "Amendment",
"fido/ED": "Editor's Draft",
"fido/WD": "Working Draft",
"fido/RD": "Review Draft",
"fido/ID": "Implementation Draft",
"fido/PS": "Proposed Standard",
"fido/FD": "Final Document",
"khronos/ED": "Editor's Draft",
}
snapshotStatuses = [
"w3c/WD",
"w3c/FPWD",
"w3c/LCWD",
"w3c/CR",
"w3c/CRD",
"w3c/PR",
"w3c/REC",
"w3c/PER",
"w3c/WG-NOTE",
"w3c/IG-NOTE",
"w3c/NOTE",
"w3c/NOTE-WD",
"w3c/NOTE-FPWD",
"w3c/MO",
]
datedStatuses = [
"w3c/WD",
"w3c/FPWD",
"w3c/LCWD",
"w3c/CR",
"w3c/CRD",
"w3c/PR",
"w3c/REC",
"w3c/PER",
"w3c/WG-NOTE",
"w3c/IG-NOTE",
"w3c/NOTE",
"w3c/NOTE-WD",
"w3c/NOTE-FPWD",
"w3c/MO",
"whatwg/RD",
]
implementationStatuses = ["w3c/CR", "w3c/CRD", "w3c/PR", "w3c/REC"]
unlevelledStatuses = [
"LS",
"LD",
"DREAM",
"w3c/UD",
"LS-COMMIT",
"LS-BRANCH",
"LS-PR",
"FINDING",
"DRAFT-FINDING",
"whatwg/RD",
]
deadlineStatuses = ["w3c/LCWD", "w3c/PR"]
noEDStatuses = [
"LS",
"LS-COMMIT",
"LS-BRANCH",
"LS-PR",
"LD",
"FINDING",
"DRAFT-FINDING",
"DREAM",
"iso/NP",
"whatwg/RD",
]
# W3C statuses are restricted in various confusing ways.
# These statuses are usable by any group operating under the W3C Process
# Document. (So, not by Community and Business Groups.)
w3cProcessDocumentStatuses = frozenset(
[
"w3c/ED",
"w3c/NOTE",
"w3c/NOTE-ED",
"w3c/NOTE-WD",
"w3c/NOTE-FPWD",
"w3c/UD",
]
)
# Interest Groups are limited to these statuses
w3cIGStatuses = frozenset(["w3c/IG-NOTE"]).union(w3cProcessDocumentStatuses)
# Working Groups are limited to these statuses
w3cWGStatuses = frozenset(
[
"w3c/WD",
"w3c/FPWD",
"w3c/LCWD",
"w3c/CR",
"w3c/CRD",
"w3c/PR",
"w3c/REC",
"w3c/PER",
"w3c/WG-NOTE",
]
).union(w3cProcessDocumentStatuses)
# The TAG is limited to these statuses
w3cTAGStatuses = frozenset(
[
"DRAFT-FINDING",
"FINDING",
"w3c/WG-NOTE", # despite the TAG not being a WG. I know, it's weird.
]
).union(w3cProcessDocumentStatuses)
# Community and Business Groups are limited to these statuses
w3cCommunityStatuses = frozenset(["w3c/CG-DRAFT", "w3c/CG-FINAL"])
megaGroups = {
"w3c": frozenset(
[
"act-framework",
"audiowg",
"browser-testing-tools",
"csswg",
"dap",
"fxtf",
"fxtf-csswg",
"geolocation",
"gpuwg",
"houdini",
"html",
"i18n",
"immersivewebcg",
"immersivewebwg",
"mediacapture",
"mediawg",
"ping",
"privacycg",
"processcg",
"ricg",
"sacg",
"secondscreencg",
"secondscreenwg",
"serviceworkers",
"svg",
"tag",
"texttracks",
"uievents",
"wasm",
"web-bluetooth-cg",
"webapps",
"webappsec",
"webauthn",
"webfontswg",
"webml",
"webmlwg",
"web-payments",
"webperf",
"webplatform",
"webrtc",
"webspecs",
"webtransport",
"webvr",
"wicg",
]
),
"whatwg": frozenset(["whatwg"]),
"tc39": frozenset(["tc39"]),
"iso": frozenset(["wg14", "wg21"]),
"fido": frozenset(["fido"]),
"priv-sec": frozenset(
[
"audiowg",
"csswg",
"dap",
"fxtf",
"fxtf-csswg",
"geolocation",
"houdini",
"html",
"mediacapture",
"mediawg",
"ricg",
"svg",
"texttracks",
"uievents",
"web-bluetooth-cg",
"webappsec",
"webfontswg",
"webplatform",
"webspecs",
"whatwg",
]
),
"khronos": frozenset(["webgl"]),
}
# Community and business groups within the W3C:
w3cCgs = frozenset(
[
"immersivewebcg",
"privacycg",
"processcg",
"ricg",
"sacg",
"webml",
"web-bluetooth-cg",
"wicg",
]
)
assert w3cCgs.issubset(megaGroups["w3c"])
# Interest Groups within the W3C:
w3cIgs = frozenset(["ping"])
assert w3cIgs.issubset(megaGroups["w3c"])
def canonicalizeStatus(rawStatus, group):
if rawStatus is None:
return None
def validateW3Cstatus(group, status, rawStatus):
if status == "DREAM":
warn("You used Status: DREAM for a W3C document." + " Consider UD instead.")
return
if "w3c/" + status in shortToLongStatus:
status = "w3c/" + status
def formatStatusSet(statuses):
return ", ".join(sorted({status.split("/")[-1] for status in statuses}))
msg = "You used Status: {0}, but {1} limited to these statuses: {2}."
if group in w3cIgs and status not in w3cIGStatuses:
warn(
msg,
rawStatus,
"W3C Interest Groups are",
formatStatusSet(w3cIGStatuses),
)
if group == "tag" and status not in w3cTAGStatuses:
warn(msg, rawStatus, "the TAG is", formatStatusSet(w3cTAGStatuses))
if group in w3cCgs and status not in w3cCommunityStatuses:
warn(
msg,
rawStatus,
"W3C Community and Business Groups are",
formatStatusSet(w3cCommunityStatuses),
)
def megaGroupsForStatus(status):
# Returns a list of megagroups that recognize the given status
megaGroups = []
for key in shortToLongStatus:
mg, _, s = key.partition("/")
if s == status:
megaGroups.append(mg)
return megaGroups
# Canonicalize the rawStatus that was passed in, into a known form.
# Might be foo/BAR, or just BAR.
megaGroup, _, status = rawStatus.partition("/")
if status == "":
status = megaGroup
megaGroup = ""
megaGroup = megaGroup.lower()
status = status.upper()
if megaGroup:
canonStatus = megaGroup + "/" + status
else:
canonStatus = status
if group is not None:
group = group.lower()
if group in megaGroups["w3c"]:
validateW3Cstatus(group, canonStatus, rawStatus)
# Using a directly-recognized status is A-OK.
# (Either one of the unrestricted statuses,
# or one of the restricted statuses with the correct standards-org prefix.)
if canonStatus in shortToLongStatus:
return canonStatus
possibleMgs = megaGroupsForStatus(status)
# If they specified a standards-org prefix and it wasn't found,
# that's an error.
if megaGroup:
# Was the error because the megagroup doesn't exist?
if possibleMgs:
if megaGroup not in megaGroups:
msg = f"Status metadata specified an unrecognized '{megaGroup}' organization."
else:
msg = f"Status '{status}' can't be used with the org '{megaGroup}'."
if "" in possibleMgs:
if len(possibleMgs) == 1:
msg += f" That status must be used without an org at all, like `Status: {status}`"
else:
msg += " That status can only be used with the org{} {}, or without an org at all.".format(
"s" if len(possibleMgs) > 1 else "",
englishFromList(f"'{x}'" for x in possibleMgs if x != ""),
)
else:
if len(possibleMgs) == 1:
msg += " That status can only be used with the org '{0}', like `Status: {0}/{1}`".format(
possibleMgs[0], status
)
else:
msg += " That status can only be used with the orgs {}.".format(
englishFromList(f"'{x}'" for x in possibleMgs)
)
else:
if megaGroup not in megaGroups:
msg = f"Unknown Status metadata '{canonStatus}'. Check the docs for valid Status values."
else:
msg = f"Status '{status}' can't be used with the org '{megaGroup}'. Check the docs for valid Status values."
die("{0}", msg)
return canonStatus
# Otherwise, they provided a bare status.
# See if their group is compatible with any of the prefixed statuses matching the bare status.
assert (
"" not in possibleMgs
) # if it was here, the literal "in" test would have caught this bare status
for mg in possibleMgs:
if group in megaGroups[mg]:
canonStatus = mg + "/" + status
if mg == "w3c":
validateW3Cstatus(group, canonStatus, rawStatus)
return canonStatus
# Group isn't in any compatible org, so suggest prefixing.
if possibleMgs:
msg = "You used Status: {}, but that's limited to the {} org{}".format(
rawStatus,
englishFromList(f"'{mg}'" for mg in possibleMgs),
"s" if len(possibleMgs) > 1 else "",
)
if group:
msg += ", and your group '{}' isn't recognized as being in {}.".format(
group, "any of those orgs" if len(possibleMgs) > 1 else "that org"
)
msg += " If this is wrong, please file a Bikeshed issue to categorize your group properly, and/or try:\n"
msg += "\n".join(f"Status: {mg}/{status}" for mg in possibleMgs)
else:
msg += ", and you don't have a Group metadata. Please declare your Group, or check the docs for statuses that can be used by anyone."
else:
msg = f"Unknown Status metadata '{canonStatus}'. Check the docs for valid Status values."
die("{0}", msg)
return canonStatus
def splitStatus(st):
if st is None:
return None, None
parts = st.partition("/")
if parts[2] == "":
return None, parts[0]
return parts[0], parts[2]
def looselyMatch(s1, s2):
# Loosely matches two statuses:
# they must have the same status name,
# and either the same or missing group name
group1, status1 = splitStatus(s1)
group2, status2 = splitStatus(s2)
if status1 != status2:
return False
if group1 == group2 or group1 is None or group2 is None:
return True
return False
|
<filename>bikeshed/config/status.py
from ..messages import *
from .main import englishFromList
shortToLongStatus = {
"DREAM": "A Collection of Interesting Ideas",
"LS": "Living Standard",
"LS-COMMIT": "Commit Snapshot",
"LS-BRANCH": "Branch Snapshot",
"LS-PR": "PR Preview",
"LD": "Living Document",
"DRAFT-FINDING": "Draft Finding",
"FINDING": "Finding",
"whatwg/RD": "Review Draft",
"w3c/ED": "Editor's Draft",
"w3c/WD": "W3C Working Draft",
"w3c/FPWD": "W3C First Public Working Draft",
"w3c/LCWD": "W3C Last Call Working Draft",
"w3c/CR": "W3C Candidate Recommendation Snapshot",
"w3c/CRD": "W3C Candidate Recommendation Draft",
"w3c/PR": "W3C Proposed Recommendation",
"w3c/REC": "W3C Recommendation",
"w3c/PER": "W3C Proposed Edited Recommendation",
"w3c/WG-NOTE": "W3C Working Group Note",
"w3c/IG-NOTE": "W3C Interest Group Note",
"w3c/NOTE": "W3C Note",
"w3c/NOTE-ED": "Editor's Draft",
"w3c/NOTE-WD": "W3C Working Draft",
"w3c/NOTE-FPWD": "W3C First Public Working Draft",
"w3c/MO": "W3C Member-only Draft",
"w3c/UD": "Unofficial Proposal Draft",
"w3c/CG-DRAFT": "Draft Community Group Report",
"w3c/CG-FINAL": "Final Community Group Report",
"tc39/STAGE0": "Stage 0: Strawman",
"tc39/STAGE1": "Stage 1: Proposal",
"tc39/STAGE2": "Stage 2: Draft",
"tc39/STAGE3": "Stage 3: Candidate",
"tc39/STAGE4": "Stage 4: Finished",
"iso/I": "Issue",
"iso/DR": "Defect Report",
"iso/D": "Draft Proposal",
"iso/P": "Published Proposal",
"iso/MEET": "Meeting Announcements",
"iso/RESP": "Records of Response",
"iso/MIN": "Minutes",
"iso/ER": "Editor's Report",
"iso/SD": "Standing Document",
"iso/PWI": "Preliminary Work Item",
"iso/NP": "New Proposal",
"iso/NWIP": "New Work Item Proposal",
"iso/WD": "Working Draft",
"iso/CD": "Committee Draft",
"iso/FCD": "Final Committee Draft",
"iso/DIS": "Draft International Standard",
"iso/FDIS": "Final Draft International Standard",
"iso/PRF": "Proof of a new International Standard",
"iso/IS": "International Standard",
"iso/TR": "Technical Report",
"iso/DTR": "Draft Technical Report",
"iso/TS": "Technical Specification",
"iso/DTS": "Draft Technical Specification",
"iso/PAS": "Publicly Available Specification",
"iso/TTA": "Technology Trends Assessment",
"iso/IWA": "International Workshop Agreement",
"iso/COR": "Technical Corrigendum",
"iso/GUIDE": "Guidance to Technical Committees",
"iso/NP-AMD": "New Proposal Amendment",
"iso/AWI-AMD": "Approved new Work Item Amendment",
"iso/WD-AMD": "Working Draft Amendment",
"iso/CD-AMD": "Committee Draft Amendment",
"iso/PD-AMD": "Proposed Draft Amendment",
"iso/FPD-AMD": "Final Proposed Draft Amendment",
"iso/D-AMD": "Draft Amendment",
"iso/FD-AMD": "Final Draft Amendment",
"iso/PRF-AMD": "Proof Amendment",
"iso/AMD": "Amendment",
"fido/ED": "Editor's Draft",
"fido/WD": "Working Draft",
"fido/RD": "Review Draft",
"fido/ID": "Implementation Draft",
"fido/PS": "Proposed Standard",
"fido/FD": "Final Document",
"khronos/ED": "Editor's Draft",
}
snapshotStatuses = [
"w3c/WD",
"w3c/FPWD",
"w3c/LCWD",
"w3c/CR",
"w3c/CRD",
"w3c/PR",
"w3c/REC",
"w3c/PER",
"w3c/WG-NOTE",
"w3c/IG-NOTE",
"w3c/NOTE",
"w3c/NOTE-WD",
"w3c/NOTE-FPWD",
"w3c/MO",
]
datedStatuses = [
"w3c/WD",
"w3c/FPWD",
"w3c/LCWD",
"w3c/CR",
"w3c/CRD",
"w3c/PR",
"w3c/REC",
"w3c/PER",
"w3c/WG-NOTE",
"w3c/IG-NOTE",
"w3c/NOTE",
"w3c/NOTE-WD",
"w3c/NOTE-FPWD",
"w3c/MO",
"whatwg/RD",
]
implementationStatuses = ["w3c/CR", "w3c/CRD", "w3c/PR", "w3c/REC"]
unlevelledStatuses = [
"LS",
"LD",
"DREAM",
"w3c/UD",
"LS-COMMIT",
"LS-BRANCH",
"LS-PR",
"FINDING",
"DRAFT-FINDING",
"whatwg/RD",
]
deadlineStatuses = ["w3c/LCWD", "w3c/PR"]
noEDStatuses = [
"LS",
"LS-COMMIT",
"LS-BRANCH",
"LS-PR",
"LD",
"FINDING",
"DRAFT-FINDING",
"DREAM",
"iso/NP",
"whatwg/RD",
]
# W3C statuses are restricted in various confusing ways.
# These statuses are usable by any group operating under the W3C Process
# Document. (So, not by Community and Business Groups.)
w3cProcessDocumentStatuses = frozenset(
[
"w3c/ED",
"w3c/NOTE",
"w3c/NOTE-ED",
"w3c/NOTE-WD",
"w3c/NOTE-FPWD",
"w3c/UD",
]
)
# Interest Groups are limited to these statuses
w3cIGStatuses = frozenset(["w3c/IG-NOTE"]).union(w3cProcessDocumentStatuses)
# Working Groups are limited to these statuses
w3cWGStatuses = frozenset(
[
"w3c/WD",
"w3c/FPWD",
"w3c/LCWD",
"w3c/CR",
"w3c/CRD",
"w3c/PR",
"w3c/REC",
"w3c/PER",
"w3c/WG-NOTE",
]
).union(w3cProcessDocumentStatuses)
# The TAG is limited to these statuses
w3cTAGStatuses = frozenset(
[
"DRAFT-FINDING",
"FINDING",
"w3c/WG-NOTE", # despite the TAG not being a WG. I know, it's weird.
]
).union(w3cProcessDocumentStatuses)
# Community and Business Groups are limited to these statuses
w3cCommunityStatuses = frozenset(["w3c/CG-DRAFT", "w3c/CG-FINAL"])
megaGroups = {
"w3c": frozenset(
[
"act-framework",
"audiowg",
"browser-testing-tools",
"csswg",
"dap",
"fxtf",
"fxtf-csswg",
"geolocation",
"gpuwg",
"houdini",
"html",
"i18n",
"immersivewebcg",
"immersivewebwg",
"mediacapture",
"mediawg",
"ping",
"privacycg",
"processcg",
"ricg",
"sacg",
"secondscreencg",
"secondscreenwg",
"serviceworkers",
"svg",
"tag",
"texttracks",
"uievents",
"wasm",
"web-bluetooth-cg",
"webapps",
"webappsec",
"webauthn",
"webfontswg",
"webml",
"webmlwg",
"web-payments",
"webperf",
"webplatform",
"webrtc",
"webspecs",
"webtransport",
"webvr",
"wicg",
]
),
"whatwg": frozenset(["whatwg"]),
"tc39": frozenset(["tc39"]),
"iso": frozenset(["wg14", "wg21"]),
"fido": frozenset(["fido"]),
"priv-sec": frozenset(
[
"audiowg",
"csswg",
"dap",
"fxtf",
"fxtf-csswg",
"geolocation",
"houdini",
"html",
"mediacapture",
"mediawg",
"ricg",
"svg",
"texttracks",
"uievents",
"web-bluetooth-cg",
"webappsec",
"webfontswg",
"webplatform",
"webspecs",
"whatwg",
]
),
"khronos": frozenset(["webgl"]),
}
# Community and business groups within the W3C:
w3cCgs = frozenset(
[
"immersivewebcg",
"privacycg",
"processcg",
"ricg",
"sacg",
"webml",
"web-bluetooth-cg",
"wicg",
]
)
assert w3cCgs.issubset(megaGroups["w3c"])
# Interest Groups within the W3C:
w3cIgs = frozenset(["ping"])
assert w3cIgs.issubset(megaGroups["w3c"])
def canonicalizeStatus(rawStatus, group):
if rawStatus is None:
return None
def validateW3Cstatus(group, status, rawStatus):
if status == "DREAM":
warn("You used Status: DREAM for a W3C document." + " Consider UD instead.")
return
if "w3c/" + status in shortToLongStatus:
status = "w3c/" + status
def formatStatusSet(statuses):
return ", ".join(sorted({status.split("/")[-1] for status in statuses}))
msg = "You used Status: {0}, but {1} limited to these statuses: {2}."
if group in w3cIgs and status not in w3cIGStatuses:
warn(
msg,
rawStatus,
"W3C Interest Groups are",
formatStatusSet(w3cIGStatuses),
)
if group == "tag" and status not in w3cTAGStatuses:
warn(msg, rawStatus, "the TAG is", formatStatusSet(w3cTAGStatuses))
if group in w3cCgs and status not in w3cCommunityStatuses:
warn(
msg,
rawStatus,
"W3C Community and Business Groups are",
formatStatusSet(w3cCommunityStatuses),
)
def megaGroupsForStatus(status):
# Returns a list of megagroups that recognize the given status
megaGroups = []
for key in shortToLongStatus:
mg, _, s = key.partition("/")
if s == status:
megaGroups.append(mg)
return megaGroups
# Canonicalize the rawStatus that was passed in, into a known form.
# Might be foo/BAR, or just BAR.
megaGroup, _, status = rawStatus.partition("/")
if status == "":
status = megaGroup
megaGroup = ""
megaGroup = megaGroup.lower()
status = status.upper()
if megaGroup:
canonStatus = megaGroup + "/" + status
else:
canonStatus = status
if group is not None:
group = group.lower()
if group in megaGroups["w3c"]:
validateW3Cstatus(group, canonStatus, rawStatus)
# Using a directly-recognized status is A-OK.
# (Either one of the unrestricted statuses,
# or one of the restricted statuses with the correct standards-org prefix.)
if canonStatus in shortToLongStatus:
return canonStatus
possibleMgs = megaGroupsForStatus(status)
# If they specified a standards-org prefix and it wasn't found,
# that's an error.
if megaGroup:
# Was the error because the megagroup doesn't exist?
if possibleMgs:
if megaGroup not in megaGroups:
msg = f"Status metadata specified an unrecognized '{megaGroup}' organization."
else:
msg = f"Status '{status}' can't be used with the org '{megaGroup}'."
if "" in possibleMgs:
if len(possibleMgs) == 1:
msg += f" That status must be used without an org at all, like `Status: {status}`"
else:
msg += " That status can only be used with the org{} {}, or without an org at all.".format(
"s" if len(possibleMgs) > 1 else "",
englishFromList(f"'{x}'" for x in possibleMgs if x != ""),
)
else:
if len(possibleMgs) == 1:
msg += " That status can only be used with the org '{0}', like `Status: {0}/{1}`".format(
possibleMgs[0], status
)
else:
msg += " That status can only be used with the orgs {}.".format(
englishFromList(f"'{x}'" for x in possibleMgs)
)
else:
if megaGroup not in megaGroups:
msg = f"Unknown Status metadata '{canonStatus}'. Check the docs for valid Status values."
else:
msg = f"Status '{status}' can't be used with the org '{megaGroup}'. Check the docs for valid Status values."
die("{0}", msg)
return canonStatus
# Otherwise, they provided a bare status.
# See if their group is compatible with any of the prefixed statuses matching the bare status.
assert (
"" not in possibleMgs
) # if it was here, the literal "in" test would have caught this bare status
for mg in possibleMgs:
if group in megaGroups[mg]:
canonStatus = mg + "/" + status
if mg == "w3c":
validateW3Cstatus(group, canonStatus, rawStatus)
return canonStatus
# Group isn't in any compatible org, so suggest prefixing.
if possibleMgs:
msg = "You used Status: {}, but that's limited to the {} org{}".format(
rawStatus,
englishFromList(f"'{mg}'" for mg in possibleMgs),
"s" if len(possibleMgs) > 1 else "",
)
if group:
msg += ", and your group '{}' isn't recognized as being in {}.".format(
group, "any of those orgs" if len(possibleMgs) > 1 else "that org"
)
msg += " If this is wrong, please file a Bikeshed issue to categorize your group properly, and/or try:\n"
msg += "\n".join(f"Status: {mg}/{status}" for mg in possibleMgs)
else:
msg += ", and you don't have a Group metadata. Please declare your Group, or check the docs for statuses that can be used by anyone."
else:
msg = f"Unknown Status metadata '{canonStatus}'. Check the docs for valid Status values."
die("{0}", msg)
return canonStatus
def splitStatus(st):
if st is None:
return None, None
parts = st.partition("/")
if parts[2] == "":
return None, parts[0]
return parts[0], parts[2]
def looselyMatch(s1, s2):
# Loosely matches two statuses:
# they must have the same status name,
# and either the same or missing group name
group1, status1 = splitStatus(s1)
group2, status2 = splitStatus(s2)
if status1 != status2:
return False
if group1 == group2 or group1 is None or group2 is None:
return True
return False
|
en
| 0.914699
|
# W3C statuses are restricted in various confusing ways. # These statuses are usable by any group operating under the W3C Process # Document. (So, not by Community and Business Groups.) # Interest Groups are limited to these statuses # Working Groups are limited to these statuses # The TAG is limited to these statuses # despite the TAG not being a WG. I know, it's weird. # Community and Business Groups are limited to these statuses # Community and business groups within the W3C: # Interest Groups within the W3C: # Returns a list of megagroups that recognize the given status # Canonicalize the rawStatus that was passed in, into a known form. # Might be foo/BAR, or just BAR. # Using a directly-recognized status is A-OK. # (Either one of the unrestricted statuses, # or one of the restricted statuses with the correct standards-org prefix.) # If they specified a standards-org prefix and it wasn't found, # that's an error. # Was the error because the megagroup doesn't exist? # Otherwise, they provided a bare status. # See if their group is compatible with any of the prefixed statuses matching the bare status. # if it was here, the literal "in" test would have caught this bare status # Group isn't in any compatible org, so suggest prefixing. # Loosely matches two statuses: # they must have the same status name, # and either the same or missing group name
| 1.870436
| 2
|
aiodine/providers.py
|
Olegt0rr/aiodine
| 61
|
6629190
|
import inspect
from contextlib import contextmanager, suppress
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Awaitable,
Callable,
Dict,
List,
Optional,
Union,
)
from . import scopes
from .compat import (
AsyncExitStack,
wrap_async,
wrap_generator_async,
ContextVar,
Token,
)
from .datatypes import CoroutineFunction
from .exceptions import ProviderDeclarationError
if TYPE_CHECKING: # pragma: no cover
from .store import Store
async def _terminate_agen(async_gen: AsyncGenerator):
with suppress(StopAsyncIteration):
await async_gen.asend(None)
class Provider:
"""Base class for providers.
This is mostly a wrapper around a provider function, along with
some metadata.
"""
__slots__ = ("func", "name", "scope", "lazy", "autouse")
def __init__(
self, func: Callable, name: str, scope: str, lazy: bool, autouse: bool
):
if lazy and scope != scopes.FUNCTION:
raise ProviderDeclarationError(
"Lazy providers must be function-scoped"
)
if inspect.isgeneratorfunction(func):
func = wrap_generator_async(func)
elif inspect.isasyncgenfunction(func):
pass
elif not inspect.iscoroutinefunction(func):
func = wrap_async(func)
assert inspect.iscoroutinefunction(func) or inspect.isasyncgenfunction(
func
)
self.func: Union[AsyncGenerator, CoroutineFunction] = func
self.name = name
self.scope = scope
self.lazy = lazy
self.autouse = autouse
@classmethod
def create(cls, func, **kwargs) -> "Provider":
"""Factory method to build a provider of the appropriate scope."""
scope: Optional[str] = kwargs.get("scope")
if scope == scopes.SESSION:
return SessionProvider(func, **kwargs)
return FunctionProvider(func, **kwargs)
# NOTE: the returned value is an awaitable, so we *must not*
# declare this function as `async` — its return value should already be.
def __call__(self, stack: AsyncExitStack) -> Awaitable:
raise NotImplementedError
class FunctionProvider(Provider):
"""Represents a function-scoped provider.
Its value is recomputed every time the provider is called.
"""
def __call__(self, stack: AsyncExitStack) -> Awaitable:
value: Union[Awaitable, AsyncGenerator] = self.func()
if inspect.isasyncgen(value):
agen = value
# We cannot use `await` in here => define a coroutine function
# and return the (awaitable) coroutine itself.
async def get_value() -> Any:
# Executes setup + `yield <some_value>`.
val = await agen.asend(None)
# Registers cleanup to be executed when the stack exits.
stack.push_async_callback(partial(_terminate_agen, agen))
return val
value: Awaitable = get_value()
return value
class SessionProvider(Provider):
"""Represents a session-scoped provider.
When called, it builds its instance if necessary and returns it. This
means that the underlying provider is only built once and is reused
across function calls.
"""
__slots__ = Provider.__slots__ + ("_instance", "_generator")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._instance: Optional[Any] = None
self._generator: Optional[AsyncGenerator] = None
async def enter_session(self):
if self._instance is not None:
return
value = self.func()
if inspect.isawaitable(value):
value = await value
if inspect.isasyncgen(value):
agen = value
value = await agen.asend(None)
self._generator = agen
self._instance = value
async def exit_session(self):
if self._generator is not None:
await _terminate_agen(self._generator)
self._generator = None
self._instance = None
async def _get_instance(self) -> Any:
if self._instance is None:
await self.enter_session()
return self._instance
def __call__(self, stack: AsyncExitStack) -> Awaitable:
return self._get_instance()
class ContextProvider:
"""A provider of context-local values.
This provider is implemented using the ``contextvars`` module.
Parameters
----------
store : Store
*names : str
The name of the variables to provide. For each variable, a
``ContextVar`` is created and used by a new provider named
after the variable.
"""
def __init__(self, store: "Store", *names: str):
self._store = store
self._variables: Dict[str, ContextVar] = {}
for name in names:
self._build_provider(name)
def _build_provider(self, name):
self._variables[name] = ContextVar(name, default=None)
async def provider():
return self._variables[name].get()
return self._store.provider(name=name)(provider)
def _set(self, **values: Any) -> List[Token]:
# Set new values for the given variables.
tokens = []
for name, val in values.items():
token = self._variables[name].set(val)
tokens.append(token)
return tokens
def _reset(self, *tokens: Token):
# Reset variables to their previous value using the given tokens.
for token in tokens:
self._variables[token.var.name].reset(token)
@contextmanager
def assign(self, **values: Any):
"""Context manager to assign values to variables.
Only the variables for the current context are changed. Values for
other contexts are unaffected.
Variables are reset to their previous value on exit.
Parameters
----------
**values : any
"""
tokens = self._set(**values)
try:
yield
finally:
self._reset(*tokens)
|
import inspect
from contextlib import contextmanager, suppress
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Awaitable,
Callable,
Dict,
List,
Optional,
Union,
)
from . import scopes
from .compat import (
AsyncExitStack,
wrap_async,
wrap_generator_async,
ContextVar,
Token,
)
from .datatypes import CoroutineFunction
from .exceptions import ProviderDeclarationError
if TYPE_CHECKING: # pragma: no cover
from .store import Store
async def _terminate_agen(async_gen: AsyncGenerator):
with suppress(StopAsyncIteration):
await async_gen.asend(None)
class Provider:
"""Base class for providers.
This is mostly a wrapper around a provider function, along with
some metadata.
"""
__slots__ = ("func", "name", "scope", "lazy", "autouse")
def __init__(
self, func: Callable, name: str, scope: str, lazy: bool, autouse: bool
):
if lazy and scope != scopes.FUNCTION:
raise ProviderDeclarationError(
"Lazy providers must be function-scoped"
)
if inspect.isgeneratorfunction(func):
func = wrap_generator_async(func)
elif inspect.isasyncgenfunction(func):
pass
elif not inspect.iscoroutinefunction(func):
func = wrap_async(func)
assert inspect.iscoroutinefunction(func) or inspect.isasyncgenfunction(
func
)
self.func: Union[AsyncGenerator, CoroutineFunction] = func
self.name = name
self.scope = scope
self.lazy = lazy
self.autouse = autouse
@classmethod
def create(cls, func, **kwargs) -> "Provider":
"""Factory method to build a provider of the appropriate scope."""
scope: Optional[str] = kwargs.get("scope")
if scope == scopes.SESSION:
return SessionProvider(func, **kwargs)
return FunctionProvider(func, **kwargs)
# NOTE: the returned value is an awaitable, so we *must not*
# declare this function as `async` — its return value should already be.
def __call__(self, stack: AsyncExitStack) -> Awaitable:
raise NotImplementedError
class FunctionProvider(Provider):
"""Represents a function-scoped provider.
Its value is recomputed every time the provider is called.
"""
def __call__(self, stack: AsyncExitStack) -> Awaitable:
value: Union[Awaitable, AsyncGenerator] = self.func()
if inspect.isasyncgen(value):
agen = value
# We cannot use `await` in here => define a coroutine function
# and return the (awaitable) coroutine itself.
async def get_value() -> Any:
# Executes setup + `yield <some_value>`.
val = await agen.asend(None)
# Registers cleanup to be executed when the stack exits.
stack.push_async_callback(partial(_terminate_agen, agen))
return val
value: Awaitable = get_value()
return value
class SessionProvider(Provider):
"""Represents a session-scoped provider.
When called, it builds its instance if necessary and returns it. This
means that the underlying provider is only built once and is reused
across function calls.
"""
__slots__ = Provider.__slots__ + ("_instance", "_generator")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._instance: Optional[Any] = None
self._generator: Optional[AsyncGenerator] = None
async def enter_session(self):
if self._instance is not None:
return
value = self.func()
if inspect.isawaitable(value):
value = await value
if inspect.isasyncgen(value):
agen = value
value = await agen.asend(None)
self._generator = agen
self._instance = value
async def exit_session(self):
if self._generator is not None:
await _terminate_agen(self._generator)
self._generator = None
self._instance = None
async def _get_instance(self) -> Any:
if self._instance is None:
await self.enter_session()
return self._instance
def __call__(self, stack: AsyncExitStack) -> Awaitable:
return self._get_instance()
class ContextProvider:
"""A provider of context-local values.
This provider is implemented using the ``contextvars`` module.
Parameters
----------
store : Store
*names : str
The name of the variables to provide. For each variable, a
``ContextVar`` is created and used by a new provider named
after the variable.
"""
def __init__(self, store: "Store", *names: str):
self._store = store
self._variables: Dict[str, ContextVar] = {}
for name in names:
self._build_provider(name)
def _build_provider(self, name):
self._variables[name] = ContextVar(name, default=None)
async def provider():
return self._variables[name].get()
return self._store.provider(name=name)(provider)
def _set(self, **values: Any) -> List[Token]:
# Set new values for the given variables.
tokens = []
for name, val in values.items():
token = self._variables[name].set(val)
tokens.append(token)
return tokens
def _reset(self, *tokens: Token):
# Reset variables to their previous value using the given tokens.
for token in tokens:
self._variables[token.var.name].reset(token)
@contextmanager
def assign(self, **values: Any):
"""Context manager to assign values to variables.
Only the variables for the current context are changed. Values for
other contexts are unaffected.
Variables are reset to their previous value on exit.
Parameters
----------
**values : any
"""
tokens = self._set(**values)
try:
yield
finally:
self._reset(*tokens)
|
en
| 0.834726
|
# pragma: no cover Base class for providers. This is mostly a wrapper around a provider function, along with some metadata. Factory method to build a provider of the appropriate scope. # NOTE: the returned value is an awaitable, so we *must not* # declare this function as `async` — its return value should already be. Represents a function-scoped provider. Its value is recomputed every time the provider is called. # We cannot use `await` in here => define a coroutine function # and return the (awaitable) coroutine itself. # Executes setup + `yield <some_value>`. # Registers cleanup to be executed when the stack exits. Represents a session-scoped provider. When called, it builds its instance if necessary and returns it. This means that the underlying provider is only built once and is reused across function calls. A provider of context-local values. This provider is implemented using the ``contextvars`` module. Parameters ---------- store : Store *names : str The name of the variables to provide. For each variable, a ``ContextVar`` is created and used by a new provider named after the variable. # Set new values for the given variables. # Reset variables to their previous value using the given tokens. Context manager to assign values to variables. Only the variables for the current context are changed. Values for other contexts are unaffected. Variables are reset to their previous value on exit. Parameters ---------- **values : any
| 2.285346
| 2
|
app/models/role.py
|
Forec/WildPointer
| 8
|
6629191
|
# -*- coding: utf-8 -*-
# @Time : 2017/9/6 09:49
# @Author : Forec
# @File : models/role.py
# @Project : WildPointer
# @license : Copyright(C), Forec
# @Contact : <EMAIL>
from .permission import Permission
from .. import db
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True, index=True)
default = db.Column(db.Boolean, default=False)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'未激活用户': (0x00, True),
'普通用户': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_POSTS, False),
'评论管理员': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_POSTS |
Permission.MODERATE_COMMENTS, False),
'问题管理员': (Permission.COMMENT |
Permission.WRITE_POSTS |
Permission.MODERATE_ALL, False),
'超级管理员': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
|
# -*- coding: utf-8 -*-
# @Time : 2017/9/6 09:49
# @Author : Forec
# @File : models/role.py
# @Project : WildPointer
# @license : Copyright(C), Forec
# @Contact : <EMAIL>
from .permission import Permission
from .. import db
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True, index=True)
default = db.Column(db.Boolean, default=False)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'未激活用户': (0x00, True),
'普通用户': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_POSTS, False),
'评论管理员': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_POSTS |
Permission.MODERATE_COMMENTS, False),
'问题管理员': (Permission.COMMENT |
Permission.WRITE_POSTS |
Permission.MODERATE_ALL, False),
'超级管理员': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
|
fr
| 0.271143
|
# -*- coding: utf-8 -*- # @Time : 2017/9/6 09:49 # @Author : Forec # @File : models/role.py # @Project : WildPointer # @license : Copyright(C), Forec # @Contact : <EMAIL>
| 2.423122
| 2
|
pylot/loggers/camera_logger_operator.py
|
victorsun123/pylot
| 0
|
6629192
|
<reponame>victorsun123/pylot<filename>pylot/loggers/camera_logger_operator.py
import numpy as np
import pickle
import PIL.Image as Image
import pylot.utils
from pylot.perception.segmentation.utils import transform_to_cityscapes_palette
from erdos.op import Op
from erdos.utils import setup_csv_logging, setup_logging
class CameraLoggerOp(Op):
""" Logs frames for different types of cameras."""
def __init__(self, name, flags, log_file_name=None, csv_file_name=None):
super(CameraLoggerOp, self).__init__(name)
self._flags = flags
self._logger = setup_logging(self.name, log_file_name)
self._csv_logger = setup_csv_logging(self.name + '-csv', csv_file_name)
self._bgr_frame_cnt = 0
self._segmented_frame_cnt = 0
self._top_down_segmented_frame_cnt = 0
self._depth_frame_cnt = 0
self._left_bgr_frame_cnt = 0
self._right_bgr_frame_cnt = 0
@staticmethod
def setup_streams(input_streams):
input_streams.filter(pylot.utils.is_center_camera_stream).add_callback(
CameraLoggerOp.on_bgr_frame)
input_streams.filter(pylot.utils.is_left_camera_stream).add_callback(
CameraLoggerOp.on_bgr_frame_left)
input_streams.filter(pylot.utils.is_right_camera_stream).add_callback(
CameraLoggerOp.on_bgr_frame_right)
input_streams.filter(
pylot.utils.is_front_segmented_camera_stream).add_callback(
CameraLoggerOp.on_front_segmented_frame)
input_streams.filter(
pylot.utils.is_top_down_segmented_camera_stream).add_callback(
CameraLoggerOp.on_top_down_segmented_frame)
input_streams.filter(
pylot.utils.is_depth_camera_stream).add_callback(
CameraLoggerOp.on_depth_frame)
return []
def on_bgr_frame(self, msg):
self._bgr_frame_cnt += 1
if self._bgr_frame_cnt % self._flags.log_every_nth_frame != 0:
return
# Write the image.
assert msg.encoding == 'BGR', 'Expects BGR frames'
rgb_array = pylot.utils.bgr_to_rgb(msg.frame)
file_name = '{}carla-center-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
rgb_img = Image.fromarray(np.uint8(rgb_array))
rgb_img.save(file_name)
def on_bgr_frame_left(self, msg):
self._left_bgr_frame_cnt += 1
if self._left_bgr_frame_cnt % self._flags.log_every_nth_frame != 0:
return
# Write the image.
assert msg.encoding == 'BGR', 'Expects BGR frames'
rgb_array = pylot.utils.bgr_to_rgb(msg.frame)
file_name = '{}carla-left-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
rgb_img = Image.fromarray(np.uint8(rgb_array))
rgb_img.save(file_name)
def on_bgr_frame_right(self, msg):
self._right_bgr_frame_cnt += 1
if self._right_bgr_frame_cnt % self._flags.log_every_nth_frame != 0:
return
# Write the image.
assert msg.encoding == 'BGR', 'Expects BGR frames'
rgb_array = pylot.utils.bgr_to_rgb(msg.frame)
file_name = '{}carla-right-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
rgb_img = Image.fromarray(np.uint8(rgb_array))
rgb_img.save(file_name)
def on_front_segmented_frame(self, msg):
self._segmented_frame_cnt += 1
if self._segmented_frame_cnt % self._flags.log_every_nth_frame != 0:
return
frame = transform_to_cityscapes_palette(msg.frame)
# Write the segmented image.
img = Image.fromarray(np.uint8(frame))
file_name = '{}carla-segmented-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
img.save(file_name)
def on_top_down_segmented_frame(self, msg):
self._top_down_segmented_frame_cnt += 1
if self._top_down_segmented_frame_cnt % self._flags.log_every_nth_frame != 0:
return
frame = transform_to_cityscapes_palette(msg.frame)
# Write the segmented image.
img = Image.fromarray(np.uint8(frame))
file_name = '{}carla-top-down-segmented-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
img.save(file_name)
def on_depth_frame(self, msg):
self._depth_frame_cnt += 1
if self._depth_frame_cnt % self._flags.log_every_nth_frame != 0:
return
# Write the depth information.
file_name = '{}carla-depth-{}.pkl'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
pickle.dump(msg.frame,
open(file_name, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
|
import numpy as np
import pickle
import PIL.Image as Image
import pylot.utils
from pylot.perception.segmentation.utils import transform_to_cityscapes_palette
from erdos.op import Op
from erdos.utils import setup_csv_logging, setup_logging
class CameraLoggerOp(Op):
""" Logs frames for different types of cameras."""
def __init__(self, name, flags, log_file_name=None, csv_file_name=None):
super(CameraLoggerOp, self).__init__(name)
self._flags = flags
self._logger = setup_logging(self.name, log_file_name)
self._csv_logger = setup_csv_logging(self.name + '-csv', csv_file_name)
self._bgr_frame_cnt = 0
self._segmented_frame_cnt = 0
self._top_down_segmented_frame_cnt = 0
self._depth_frame_cnt = 0
self._left_bgr_frame_cnt = 0
self._right_bgr_frame_cnt = 0
@staticmethod
def setup_streams(input_streams):
input_streams.filter(pylot.utils.is_center_camera_stream).add_callback(
CameraLoggerOp.on_bgr_frame)
input_streams.filter(pylot.utils.is_left_camera_stream).add_callback(
CameraLoggerOp.on_bgr_frame_left)
input_streams.filter(pylot.utils.is_right_camera_stream).add_callback(
CameraLoggerOp.on_bgr_frame_right)
input_streams.filter(
pylot.utils.is_front_segmented_camera_stream).add_callback(
CameraLoggerOp.on_front_segmented_frame)
input_streams.filter(
pylot.utils.is_top_down_segmented_camera_stream).add_callback(
CameraLoggerOp.on_top_down_segmented_frame)
input_streams.filter(
pylot.utils.is_depth_camera_stream).add_callback(
CameraLoggerOp.on_depth_frame)
return []
def on_bgr_frame(self, msg):
self._bgr_frame_cnt += 1
if self._bgr_frame_cnt % self._flags.log_every_nth_frame != 0:
return
# Write the image.
assert msg.encoding == 'BGR', 'Expects BGR frames'
rgb_array = pylot.utils.bgr_to_rgb(msg.frame)
file_name = '{}carla-center-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
rgb_img = Image.fromarray(np.uint8(rgb_array))
rgb_img.save(file_name)
def on_bgr_frame_left(self, msg):
self._left_bgr_frame_cnt += 1
if self._left_bgr_frame_cnt % self._flags.log_every_nth_frame != 0:
return
# Write the image.
assert msg.encoding == 'BGR', 'Expects BGR frames'
rgb_array = pylot.utils.bgr_to_rgb(msg.frame)
file_name = '{}carla-left-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
rgb_img = Image.fromarray(np.uint8(rgb_array))
rgb_img.save(file_name)
def on_bgr_frame_right(self, msg):
self._right_bgr_frame_cnt += 1
if self._right_bgr_frame_cnt % self._flags.log_every_nth_frame != 0:
return
# Write the image.
assert msg.encoding == 'BGR', 'Expects BGR frames'
rgb_array = pylot.utils.bgr_to_rgb(msg.frame)
file_name = '{}carla-right-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
rgb_img = Image.fromarray(np.uint8(rgb_array))
rgb_img.save(file_name)
def on_front_segmented_frame(self, msg):
self._segmented_frame_cnt += 1
if self._segmented_frame_cnt % self._flags.log_every_nth_frame != 0:
return
frame = transform_to_cityscapes_palette(msg.frame)
# Write the segmented image.
img = Image.fromarray(np.uint8(frame))
file_name = '{}carla-segmented-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
img.save(file_name)
def on_top_down_segmented_frame(self, msg):
self._top_down_segmented_frame_cnt += 1
if self._top_down_segmented_frame_cnt % self._flags.log_every_nth_frame != 0:
return
frame = transform_to_cityscapes_palette(msg.frame)
# Write the segmented image.
img = Image.fromarray(np.uint8(frame))
file_name = '{}carla-top-down-segmented-{}.png'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
img.save(file_name)
def on_depth_frame(self, msg):
self._depth_frame_cnt += 1
if self._depth_frame_cnt % self._flags.log_every_nth_frame != 0:
return
# Write the depth information.
file_name = '{}carla-depth-{}.pkl'.format(
self._flags.data_path, msg.timestamp.coordinates[0])
pickle.dump(msg.frame,
open(file_name, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
|
en
| 0.662184
|
Logs frames for different types of cameras. # Write the image. # Write the image. # Write the image. # Write the segmented image. # Write the segmented image. # Write the depth information.
| 2.292632
| 2
|
mlrun/serving/states.py
|
Hedingber/mlrun
| 0
|
6629193
|
<filename>mlrun/serving/states.py
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["TaskStep", "RouterStep", "RootFlowStep"]
import os
import pathlib
import traceback
import warnings
from copy import copy, deepcopy
from inspect import getfullargspec, signature
from typing import Union
from ..config import config
from ..datastore import get_stream_pusher
from ..errors import MLRunInvalidArgumentError
from ..model import ModelObj, ObjectDict
from ..platforms.iguazio import parse_v3io_path
from ..utils import get_class, get_function
callable_prefix = "_"
path_splitter = "/"
previous_step = "$prev"
class GraphError(Exception):
"""error in graph topology or configuration"""
pass
class StepKinds:
router = "router"
task = "task"
flow = "flow"
queue = "queue"
choice = "choice"
root = "root"
_task_step_fields = [
"kind",
"class_name",
"class_args",
"handler",
"skip_context",
"after",
"function",
"comment",
"shape",
"full_event",
"on_error",
"responder",
]
def new_model_endpoint(class_name, model_path, handler=None, **class_args):
class_args = deepcopy(class_args)
class_args["model_path"] = model_path
return TaskStep(class_name, class_args, handler=handler)
def new_remote_endpoint(url, **class_args):
class_args = deepcopy(class_args)
class_args["url"] = url
return TaskStep("$remote", class_args)
class BaseStep(ModelObj):
kind = "BaseStep"
default_shape = "ellipse"
_dict_fields = ["kind", "comment", "after", "on_error"]
def __init__(self, name: str = None, after: list = None, shape: str = None):
self.name = name
self._parent = None
self.comment = None
self.context = None
self.after = after
self._next = None
self.shape = shape
self.on_error = None
self._on_error_handler = None
def get_shape(self):
"""graphviz shape"""
return self.shape or self.default_shape
def set_parent(self, parent):
"""set/link the step parent (flow/router)"""
self._parent = parent
@property
def next(self):
return self._next
@property
def parent(self):
"""step parent (flow/router)"""
return self._parent
def set_next(self, key: str):
"""set/insert the key as next after this step, optionally remove other keys"""
if not self.next:
self._next = [key]
elif key not in self.next:
self._next.append(key)
return self
def after_step(self, after):
"""specify the previous step name"""
# most steps only accept one source
self.after = [after] if after else []
return self
def after_state(self, after):
warnings.warn(
"This method is deprecated. Use after_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.after_step(after)
def error_handler(self, step_name: str = None, state_name=None):
"""set error handler step (on failure/raise of this step)"""
if state_name:
warnings.warn(
"The state_name parameter is deprecated. Use step_name instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
step_name = step_name or state_name
if not step_name:
raise MLRunInvalidArgumentError("Must specify step_name")
self.on_error = step_name
return self
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
"""init the step class"""
self.context = context
def _is_local_function(self, context):
return True
def get_children(self):
"""get child steps (for router/flow)"""
return []
def __iter__(self):
yield from []
@property
def fullname(self):
"""full path/name (include parents)"""
name = self.name or ""
if self._parent and self._parent.fullname:
name = path_splitter.join([self._parent.fullname, name])
return name.replace(":", "_") # replace for graphviz escaping
def _post_init(self, mode="sync"):
pass
def _set_error_handler(self):
"""init/link the error handler for this step"""
if self.on_error:
error_step = self.context.root.path_to_step(self.on_error)
self._on_error_handler = error_step.run
def _log_error(self, event, err, **kwargs):
"""on failure log (for sync mode)"""
self.context.logger.error(
f"step {self.name} got error {err} when processing an event:\n {event.body}"
)
message = traceback.format_exc()
self.context.logger.error(message)
self.context.push_error(
event, f"{err}\n{message}", source=self.fullname, **kwargs
)
def _call_error_handler(self, event, err, **kwargs):
"""call the error handler if exist"""
if self._on_error_handler:
event.error = str(err)
event.origin_state = self.fullname
return self._on_error_handler(event)
def path_to_step(self, path: str):
"""return step object from step relative/fullname"""
path = path or ""
tree = path.split(path_splitter)
next_level = self
for step in tree:
if step not in next_level:
raise GraphError(
f"step {step} doesnt exist in the graph under {next_level.fullname}"
)
next_level = next_level[step]
return next_level
def path_to_state(self, path: str):
warnings.warn(
"This method is deprecated. Use path_to_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.path_to_step(path)
def to(
self,
class_name: Union[str, type] = None,
name: str = None,
handler: str = None,
graph_shape: str = None,
function: str = None,
full_event: bool = None,
**class_args,
):
"""add a step right after this step and return the new step
example, a 4 step pipeline ending with a stream:
graph.to('URLDownloader')\
.to('ToParagraphs')\
.to(name='to_json', handler='json.dumps')\
.to('>>', 'to_v3io', path=stream_path)\
:param class_name: class name or step object to build the step from
for router steps the class name should start with '*'
for queue/stream step the class should be '>>' or '$queue'
:param name: unique name (and path) for the child step, default is class name
:param handler: class/function handler to invoke on run/event
:param graph_shape: graphviz shape name
:param function: function this step should run in
:param full_event: this step accepts the full event (not just body)
:param class_args: class init arguments
"""
if hasattr(self, "steps"):
parent = self
elif self._parent:
parent = self._parent
else:
raise GraphError(
f"step {self.name} parent is not set or its not part of a graph"
)
name, step = params_to_step(
class_name,
name,
handler,
graph_shape=graph_shape,
function=function,
full_event=full_event,
class_args=class_args,
)
step = parent._steps.update(name, step)
step.set_parent(parent)
if not hasattr(self, "steps"):
# check that its not the root, todo: in future may gave nested flows
step.after_step(self.name)
parent._last_added = step
return step
class TaskStep(BaseStep):
"""task execution step, runs a class or handler"""
kind = "task"
_dict_fields = _task_step_fields
_default_class = ""
def __init__(
self,
class_name: Union[str, type] = None,
class_args: dict = None,
handler: str = None,
name: str = None,
after: list = None,
full_event: bool = None,
function: str = None,
responder: bool = None,
):
super().__init__(name, after)
self.class_name = class_name
self.class_args = class_args or {}
self.handler = handler
self.function = function
self._handler = None
self._object = None
self._async_object = None
self.skip_context = None
self.context = None
self._class_object = None
self.responder = responder
self.full_event = full_event
self.on_error = None
self._inject_context = False
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
self.context = context
self._async_object = None
if not self._is_local_function(context):
# skip init of non local functions
return
if self.handler and not self.class_name:
# link to function
if callable(self.handler):
self._handler = self.handler
self.handler = self.handler.__name__
else:
self._handler = get_function(self.handler, namespace)
args = signature(self._handler).parameters
if args and "context" in list(args.keys()):
self._inject_context = True
return
if isinstance(self.class_name, type):
self._class_object = self.class_name
self.class_name = self.class_name.__name__
if not self._class_object:
if self.class_name == "$remote":
from mlrun.serving.remote import RemoteStep
self._class_object = RemoteStep
else:
self._class_object = get_class(
self.class_name or self._default_class, namespace
)
if not self._object or reset:
# init the step class + args
class_args = {}
for key, arg in self.class_args.items():
if key.startswith(callable_prefix):
class_args[key[1:]] = get_function(arg, namespace)
else:
class_args[key] = arg
class_args.update(extra_kwargs)
# add name and context only if target class can accept them
argspec = getfullargspec(self._class_object)
if argspec.varkw or "context" in argspec.args:
class_args["context"] = self.context
if argspec.varkw or "name" in argspec.args:
class_args["name"] = self.name
try:
self._object = self._class_object(**class_args)
except TypeError as exc:
raise TypeError(
f"failed to init step {self.name}, {exc}\n args={self.class_args}"
)
# determine the right class handler to use
handler = self.handler
if handler:
if not hasattr(self._object, handler):
raise GraphError(
f"handler ({handler}) specified but doesnt exist in class {self.class_name}"
)
else:
if hasattr(self._object, "do_event"):
handler = "do_event"
self.full_event = True
elif hasattr(self._object, "do"):
handler = "do"
if handler:
self._handler = getattr(self._object, handler, None)
self._set_error_handler()
if mode != "skip":
self._post_init(mode)
def _is_local_function(self, context):
# detect if the class is local (and should be initialized)
current_function = get_current_function(context)
if current_function == "*":
return True
if not self.function and not current_function:
return True
if (
self.function and self.function == "*"
) or self.function == current_function:
return True
return False
@property
def async_object(self):
"""return the sync or async (storey) class instance"""
return self._async_object or self._object
def clear_object(self):
self._object = None
def _post_init(self, mode="sync"):
if self._object and hasattr(self._object, "post_init"):
self._object.post_init(mode)
def respond(self):
"""mark this step as the responder.
step output will be returned as the flow result, no other step can follow
"""
self.responder = True
return self
def run(self, event, *args, **kwargs):
"""run this step, in async flows the run is done through storey"""
if not self._is_local_function(self.context):
# todo invoke remote via REST call
return event
if self.context.verbose:
self.context.logger.info(f"step {self.name} got event {event.body}")
# inject context parameter if it is expected by the handler
if self._inject_context:
kwargs["context"] = self.context
elif kwargs and "context" in kwargs:
del kwargs["context"]
try:
if self.full_event:
return self._handler(event, *args, **kwargs)
event.body = self._handler(event.body, *args, **kwargs)
except Exception as exc:
self._log_error(event, exc)
handled = self._call_error_handler(event, exc)
if not handled:
raise exc
event.terminated = True
return event
class RouterStep(TaskStep):
"""router step, implement routing logic for running child routes"""
kind = "router"
default_shape = "doubleoctagon"
_dict_fields = _task_step_fields + ["routes"]
_default_class = "mlrun.serving.ModelRouter"
def __init__(
self,
class_name: Union[str, type] = None,
class_args: dict = None,
handler: str = None,
routes: list = None,
name: str = None,
function: str = None,
):
super().__init__(class_name, class_args, handler, name=name, function=function)
self._routes: ObjectDict = None
self.routes = routes
def get_children(self):
"""get child steps (routes)"""
return self._routes.values()
@property
def routes(self):
"""child routes/steps, traffic is routed to routes based on router logic"""
return self._routes
@routes.setter
def routes(self, routes: dict):
self._routes = ObjectDict.from_dict(classes_map, routes, "task")
def add_route(self, key, route=None, class_name=None, handler=None, **class_args):
"""add child route step or class to the router
:param key: unique name (and route path) for the child step
:param route: child step object (Task, ..)
:param class_name: class name to build the route step from (when route is not provided)
:param class_args: class init arguments
:param handler: class handler to invoke on run/event
"""
if not route and not class_name:
raise MLRunInvalidArgumentError("route or class_name must be specified")
if not route:
route = TaskStep(class_name, class_args, handler=handler)
route = self._routes.update(key, route)
route.set_parent(self)
return route
def clear_children(self, routes: list):
"""clear child steps (routes)"""
if not routes:
routes = self._routes.keys()
for key in routes:
del self._routes[key]
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
if not self._is_local_function(context):
return
self.class_args = self.class_args or {}
super().init_object(
context, namespace, "skip", reset=reset, routes=self._routes, **extra_kwargs
)
for route in self._routes.values():
route.set_parent(self)
route.init_object(context, namespace, mode, reset=reset)
self._set_error_handler()
self._post_init(mode)
def __getitem__(self, name):
return self._routes[name]
def __setitem__(self, name, route):
self.add_route(name, route)
def __delitem__(self, key):
del self._routes[key]
def __iter__(self):
yield from self._routes.keys()
def plot(self, filename=None, format=None, source=None, **kw):
"""plot/save a graphviz plot"""
return _generate_graphviz(
self, _add_graphviz_router, filename, format, source=source, **kw
)
class QueueStep(BaseStep):
"""queue step, implement an async queue or represent a stream"""
kind = "queue"
default_shape = "cds"
_dict_fields = BaseStep._dict_fields + [
"path",
"shards",
"retention_in_hours",
"options",
]
def __init__(
self,
name: str = None,
path: str = None,
after: list = None,
shards: int = None,
retention_in_hours: int = None,
**options,
):
super().__init__(name, after)
self.path = path
self.shards = shards
self.retention_in_hours = retention_in_hours
self.options = options
self._stream = None
self._async_object = None
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
self.context = context
if self.path:
self._stream = get_stream_pusher(
self.path,
shards=self.shards,
retention_in_hours=self.retention_in_hours,
)
self._set_error_handler()
@property
def async_object(self):
return self._async_object
def after_step(self, after):
# queue steps accept multiple sources
if self.after:
if after:
self.after.append(after)
else:
self.after = [after] if after else []
return self
def after_state(self, after):
warnings.warn(
"This method is deprecated. Use after_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.after_step(after)
def run(self, event, *args, **kwargs):
data = event.body
if not data:
return event
if self._stream:
self._stream.push({"id": event.id, "body": data, "path": event.path})
event.terminated = True
event.body = None
return event
class FlowStep(BaseStep):
"""flow step, represent a workflow or DAG"""
kind = "flow"
_dict_fields = BaseStep._dict_fields + [
"steps",
"engine",
"default_final_step",
]
# TODO - remove once "states" is fully deprecated
@classmethod
def from_dict(cls, struct=None, fields=None, deprecated_fields: dict = None):
deprecated_fields = deprecated_fields or {}
deprecated_fields.update(
{"states": "steps", "default_final_state": "default_final_step"}
)
return super().from_dict(
struct, fields=fields, deprecated_fields=deprecated_fields
)
def __init__(
self,
name=None,
steps=None,
after: list = None,
engine=None,
final_step=None,
# TODO - remove once usage of "state" is fully deprecated
states=None,
final_state=None,
):
super().__init__(name, after)
if states:
warnings.warn(
"The states parameter is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
steps = steps or states
if final_state:
warnings.warn(
"The final_state parameter is deprecated. Use final_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
final_step = final_step or final_state
self._steps = None
self.steps = steps
self.engine = engine
# TODO - remove use of START_FROM_STATE once it's fully deprecated.
self.from_step = os.environ.get("START_FROM_STEP", None) or os.environ.get(
"START_FROM_STATE", None
)
self.final_step = final_step
self._last_added = None
self._controller = None
self._wait_for_result = False
self._source = None
self._start_steps = []
def get_children(self):
return self._steps.values()
@property
def steps(self):
"""child (workflow) steps"""
return self._steps
@property
def states(self):
warnings.warn(
"This property is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self._steps
@property
def controller(self):
"""async (storey) flow controller"""
return self._controller
@steps.setter
def steps(self, steps):
self._steps = ObjectDict.from_dict(classes_map, steps, "task")
@states.setter
def states(self, states):
warnings.warn(
"This property is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
self._steps = ObjectDict.from_dict(classes_map, states, "task")
def add_step(
self,
class_name=None,
name=None,
handler=None,
after=None,
before=None,
graph_shape=None,
function=None,
full_event: bool = None,
**class_args,
):
"""add task, queue or router step/class to the flow
use after/before to insert into a specific location
example:
graph = fn.set_topology("flow", exist_ok=True)
graph.add_step(class_name="Chain", name="s1")
graph.add_step(class_name="Chain", name="s3", after="$prev")
graph.add_step(class_name="Chain", name="s2", after="s1", before="s3")
:param class_name: class name or step object to build the step from
for router steps the class name should start with '*'
for queue/stream step the class should be '>>' or '$queue'
:param name: unique name (and path) for the child step, default is class name
:param handler: class/function handler to invoke on run/event
:param after: the step name this step comes after
can use $prev to indicate the last added step
:param before: string or list of next step names that will run after this step
:param graph_shape: graphviz shape name
:param function: function this step should run in
:param class_args: class init arguments
"""
name, step = params_to_step(
class_name,
name,
handler,
graph_shape=graph_shape,
function=function,
full_event=full_event,
class_args=class_args,
)
self.insert_step(name, step, after, before)
return step
def insert_state(self, key, state, after, before=None):
warnings.warn(
"This method is deprecated. Use insert_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.insert_step(key, state, after, before)
def insert_step(self, key, step, after, before=None):
"""insert step object into the flow, specify before and after"""
step = self._steps.update(key, step)
step.set_parent(self)
if after == "$prev" and len(self._steps) == 1:
after = None
previous = ""
if after:
if after == "$prev" and self._last_added:
previous = self._last_added.name
else:
if after not in self._steps.keys():
raise MLRunInvalidArgumentError(
f"cant set after, there is no step named {after}"
)
previous = after
step.after_step(previous)
if before:
if before not in self._steps.keys():
raise MLRunInvalidArgumentError(
f"cant set before, there is no step named {before}"
)
if before == step.name or before == previous:
raise GraphError(
f"graph loop, step {before} is specified in before and/or after {key}"
)
self[before].after_step(step.name)
self._last_added = step
return step
def clear_children(self, steps: list = None, states: list = None):
"""remove some or all of the states, empty/None for all"""
if states:
warnings.warn(
"This states parameter is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
steps = steps or states
if not steps:
steps = self._steps.keys()
for key in steps:
del self._steps[key]
def __getitem__(self, name):
return self._steps[name]
def __setitem__(self, name, step):
self.add_step(name, step)
def __delitem__(self, key):
del self._steps[key]
def __iter__(self):
yield from self._steps.keys()
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
self.context = context
self.check_and_process_graph()
for step in self._steps.values():
step.set_parent(self)
step.init_object(context, namespace, mode, reset=reset)
self._set_error_handler()
self._post_init(mode)
if self.engine != "sync":
self._build_async_flow()
def check_and_process_graph(self, allow_empty=False):
"""validate correct graph layout and initialize the .next links"""
if self.is_empty() and allow_empty:
self._start_steps = []
return [], None, []
def has_loop(step, previous):
for next_step in step.after or []:
if next_step in previous:
return step.name
downstream = has_loop(self[next_step], previous + [next_step])
if downstream:
return downstream
return None
start_steps = []
for step in self._steps.values():
step._next = None
if step.after:
loop_step = has_loop(step, [])
if loop_step:
raise GraphError(
f"Error, loop detected in step {loop_step}, graph must be acyclic (DAG)"
)
else:
start_steps.append(step.name)
responders = []
for step in self._steps.values():
if hasattr(step, "responder") and step.responder:
responders.append(step.name)
if step.on_error and step.on_error in start_steps:
start_steps.remove(step.on_error)
if step.after:
prev_step = step.after[0]
self[prev_step].set_next(step.name)
if self.on_error and self.on_error in start_steps:
start_steps.remove(self.on_error)
if (
len(responders) > 1
): # should not have multiple steps which respond to request
raise GraphError(
f'there are more than one responder steps in the graph ({",".join(responders)})'
)
if self.from_step:
if self.from_step not in self.steps:
raise GraphError(
f"from_step ({self.from_step}) specified and not found in graph steps"
)
start_steps = [self.from_step]
self._start_steps = [self[name] for name in start_steps]
def get_first_function_step(step, current_function):
# find the first step which belongs to the function
if (
hasattr(step, "function")
and step.function
and step.function == current_function
):
return step
for item in step.next or []:
next_step = self[item]
returned_step = get_first_function_step(next_step, current_function)
if returned_step:
return returned_step
current_function = get_current_function(self.context)
if current_function and current_function != "*":
new_start_steps = []
for from_step in self._start_steps:
step = get_first_function_step(from_step, current_function)
if step:
new_start_steps.append(step)
if not new_start_steps:
raise GraphError(
f"did not find steps pointing to current function ({current_function})"
)
self._start_steps = new_start_steps
if self.engine == "sync" and len(self._start_steps) > 1:
raise GraphError(
"sync engine can only have one starting step (without .after)"
)
default_final_step = None
if self.final_step:
if self.final_step not in self.steps:
raise GraphError(
f"final_step ({self.final_step}) specified and not found in graph steps"
)
default_final_step = self.final_step
elif len(self._start_steps) == 1:
# find the final step in case if a simple sequence of steps
next_obj = self._start_steps[0]
while next_obj:
next = next_obj.next
if not next:
default_final_step = next_obj.name
break
next_obj = self[next[0]] if len(next) == 1 else None
return self._start_steps, default_final_step, responders
def set_flow_source(self, source):
"""set the async flow (storey) source"""
self._source = source
def _build_async_flow(self):
"""initialize and build the async/storey DAG"""
try:
import storey
except ImportError:
raise GraphError("storey package is not installed, use pip install storey")
def process_step(state, step, root):
if not state._is_local_function(self.context):
return
for item in state.next or []:
next_state = root[item]
if next_state.async_object:
next_step = step.to(next_state.async_object)
process_step(next_state, next_step, root)
for step in self._steps.values():
if hasattr(step, "async_object") and step._is_local_function(self.context):
if step.kind == StepKinds.queue:
skip_stream = self.context.is_mock and step.next
if step.path and not skip_stream:
stream_path = step.path
endpoint = None
if "://" in stream_path:
endpoint, stream_path = parse_v3io_path(step.path)
stream_path = stream_path.strip("/")
step._async_object = storey.StreamTarget(
storey.V3ioDriver(endpoint), stream_path
)
else:
step._async_object = storey.Map(lambda x: x)
elif not step.async_object or not hasattr(
step.async_object, "_outlets"
):
# if regular class, wrap with storey Map
step._async_object = storey.Map(
step._handler,
full_event=step.full_event,
name=step.name,
context=self.context,
)
if not step.next and hasattr(step, "responder") and step.responder:
# if responder step (return result), add Complete()
step.async_object.to(storey.Complete(full_event=True))
self._wait_for_result = True
# todo: allow source array (e.g. data->json loads..)
source = self._source or storey.SyncEmitSource()
for next_state in self._start_steps:
next_step = source.to(next_state.async_object)
process_step(next_state, next_step, self)
for step in self._steps.values():
# add error handler hooks
if (step.on_error or self.on_error) and step.async_object:
error_step = self._steps[step.on_error or self.on_error]
# never set a step as its own error handler
if step != error_step:
step.async_object.set_recovery_step(error_step.async_object)
self._controller = source.run()
def get_queue_links(self):
"""return dict of function and queue its listening on, for building stream triggers"""
links = {}
for step in self.get_children():
if step.kind == StepKinds.queue:
for item in step.next or []:
next_step = self[item]
if not next_step.function:
raise GraphError(
f"child function name must be specified in steps ({next_step.name}) which follow a queue"
)
if next_step.function in links:
raise GraphError(
f"function ({next_step.function}) cannot read from multiple queues"
)
links[next_step.function] = step
return links
def init_queues(self):
"""init/create the streams used in this flow"""
for step in self.get_children():
if step.kind == StepKinds.queue:
step.init_object(self.context, None)
def is_empty(self):
"""is the graph empty (no child steps)"""
return len(self.steps) == 0
@staticmethod
async def _await_and_return_id(awaitable, event):
await awaitable
event = copy(event)
event.body = {"id": event.id}
return event
def run(self, event, *args, **kwargs):
if self._controller:
# async flow (using storey)
event._awaitable_result = None
if config.datastore.async_source_mode == "enabled":
resp_awaitable = self._controller.emit(
event, await_result=self._wait_for_result
)
if self._wait_for_result:
return resp_awaitable
return self._await_and_return_id(resp_awaitable, event)
else:
resp = self._controller.emit(
event, return_awaitable_result=self._wait_for_result
)
if self._wait_for_result and resp:
return resp.await_result()
event = copy(event)
event.body = {"id": event.id}
return event
if len(self._start_steps) == 0:
return event
next_obj = self._start_steps[0]
while next_obj:
try:
event = next_obj.run(event, *args, **kwargs)
except Exception as exc:
self._log_error(event, exc, failed_step=next_obj.name)
handled = self._call_error_handler(event, exc)
if not handled:
raise exc
event.terminated = True
return event
if hasattr(event, "terminated") and event.terminated:
return event
next = next_obj.next
if next and len(next) > 1:
raise GraphError(
f"synchronous flow engine doesnt support branches use async, step={next_obj.name}"
)
next_obj = self[next[0]] if next else None
return event
def wait_for_completion(self):
"""wait for completion of run in async flows"""
if self._controller:
if hasattr(self._controller, "terminate"):
self._controller.terminate()
return self._controller.await_termination()
def plot(self, filename=None, format=None, source=None, targets=None, **kw):
"""plot/save graph using graphviz"""
return _generate_graphviz(
self,
_add_graphviz_flow,
filename,
format,
source=source,
targets=targets,
**kw,
)
class RootFlowStep(FlowStep):
"""root flow step"""
kind = "root"
_dict_fields = ["steps", "engine", "final_step", "on_error"]
# TODO - remove once "final_state" is fully deprecated
@classmethod
def from_dict(cls, struct=None, fields=None):
return super().from_dict(
struct, fields=fields, deprecated_fields={"final_state": "final_step"}
)
classes_map = {
"task": TaskStep,
"router": RouterStep,
"flow": FlowStep,
"queue": QueueStep,
}
def get_current_function(context):
if context and hasattr(context, "current_function"):
return context.current_function or ""
return ""
def _add_graphviz_router(graph, step, source=None, **kwargs):
if source:
graph.node("_start", source.name, shape=source.shape, style="filled")
graph.edge("_start", step.fullname)
graph.node(step.fullname, label=step.name, shape=step.get_shape())
for route in step.get_children():
graph.node(route.fullname, label=route.name, shape=route.get_shape())
graph.edge(step.fullname, route.fullname)
def _add_graphviz_flow(
graph, step, source=None, targets=None,
):
start_steps, default_final_step, responders = step.check_and_process_graph(
allow_empty=True
)
graph.node("_start", source.name, shape=source.shape, style="filled")
for start_step in start_steps:
graph.edge("_start", start_step.fullname)
for child in step.get_children():
kind = child.kind
if kind == StepKinds.router:
with graph.subgraph(name="cluster_" + child.fullname) as sg:
_add_graphviz_router(sg, child)
else:
graph.node(child.fullname, label=child.name, shape=child.get_shape())
after = child.after or []
for item in after:
previous_object = step[item]
kw = (
{"ltail": "cluster_" + previous_object.fullname}
if previous_object.kind == StepKinds.router
else {}
)
graph.edge(previous_object.fullname, child.fullname, **kw)
if child.on_error:
graph.edge(child.fullname, child.on_error, style="dashed")
# draw targets after the last step (if specified)
if targets:
for target in targets or []:
graph.node(target.fullname, label=target.name, shape=target.get_shape())
last_step = target.after or default_final_step
if last_step:
graph.edge(last_step, target.fullname)
def _generate_graphviz(
step, renderer, filename=None, format=None, source=None, targets=None, **kw,
):
try:
from graphviz import Digraph
except ImportError:
raise ImportError(
'graphviz is not installed, run "pip install graphviz" first!'
)
graph = Digraph("mlrun-flow", format="jpg")
graph.attr(compound="true", **kw)
source = source or BaseStep("start", shape="egg")
renderer(graph, step, source=source, targets=targets)
if filename:
suffix = pathlib.Path(filename).suffix
if suffix:
filename = filename[: -len(suffix)]
format = format or suffix[1:]
format = format or "png"
graph.render(filename, format=format)
return graph
def graph_root_setter(server, graph):
"""set graph root object from class or dict"""
if graph:
if isinstance(graph, dict):
kind = graph.get("kind")
elif hasattr(graph, "kind"):
kind = graph.kind
else:
raise MLRunInvalidArgumentError("graph must be a dict or a valid object")
if kind == StepKinds.router:
server._graph = server._verify_dict(graph, "graph", RouterStep)
elif not kind or kind == StepKinds.root:
server._graph = server._verify_dict(graph, "graph", RootFlowStep)
else:
raise GraphError(f"illegal root step {kind}")
def get_name(name, class_name):
"""get task name from provided name or class"""
if name:
return name
if not class_name:
raise MLRunInvalidArgumentError("name or class_name must be provided")
if isinstance(class_name, type):
return class_name.__name__
return class_name
def params_to_state(
class_name,
name,
handler=None,
graph_shape=None,
function=None,
full_event=None,
class_args=None,
):
warnings.warn(
"This method is deprecated. Use param_to_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return params_to_step(
class_name, name, handler, graph_shape, function, full_event, class_args
)
def params_to_step(
class_name,
name,
handler=None,
graph_shape=None,
function=None,
full_event=None,
class_args=None,
):
"""return step object from provided params or classes/objects"""
if class_name and hasattr(class_name, "to_dict"):
struct = class_name.to_dict()
kind = struct.get("kind", StepKinds.task)
name = name or struct.get("name", struct.get("class_name"))
cls = classes_map.get(kind, RootFlowStep)
step = cls.from_dict(struct)
step.function = function
step.full_event = full_event
elif class_name and class_name in [">>", "$queue"]:
if "path" not in class_args:
raise MLRunInvalidArgumentError(
"path=<stream path or None> must be specified for queues"
)
if not name:
raise MLRunInvalidArgumentError("queue name must be specified")
step = QueueStep(name, **class_args)
elif class_name and class_name.startswith("*"):
routes = class_args.get("routes", None)
class_name = class_name[1:]
name = get_name(name, class_name or "router")
step = RouterStep(
class_name, class_args, handler, name=name, function=function, routes=routes
)
elif class_name or handler:
name = get_name(name, class_name)
step = TaskStep(
class_name,
class_args,
handler,
name=name,
function=function,
full_event=full_event,
)
else:
raise MLRunInvalidArgumentError("class_name or handler must be provided")
if graph_shape:
step.shape = graph_shape
return name, step
|
<filename>mlrun/serving/states.py
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["TaskStep", "RouterStep", "RootFlowStep"]
import os
import pathlib
import traceback
import warnings
from copy import copy, deepcopy
from inspect import getfullargspec, signature
from typing import Union
from ..config import config
from ..datastore import get_stream_pusher
from ..errors import MLRunInvalidArgumentError
from ..model import ModelObj, ObjectDict
from ..platforms.iguazio import parse_v3io_path
from ..utils import get_class, get_function
callable_prefix = "_"
path_splitter = "/"
previous_step = "$prev"
class GraphError(Exception):
"""error in graph topology or configuration"""
pass
class StepKinds:
router = "router"
task = "task"
flow = "flow"
queue = "queue"
choice = "choice"
root = "root"
_task_step_fields = [
"kind",
"class_name",
"class_args",
"handler",
"skip_context",
"after",
"function",
"comment",
"shape",
"full_event",
"on_error",
"responder",
]
def new_model_endpoint(class_name, model_path, handler=None, **class_args):
class_args = deepcopy(class_args)
class_args["model_path"] = model_path
return TaskStep(class_name, class_args, handler=handler)
def new_remote_endpoint(url, **class_args):
class_args = deepcopy(class_args)
class_args["url"] = url
return TaskStep("$remote", class_args)
class BaseStep(ModelObj):
kind = "BaseStep"
default_shape = "ellipse"
_dict_fields = ["kind", "comment", "after", "on_error"]
def __init__(self, name: str = None, after: list = None, shape: str = None):
self.name = name
self._parent = None
self.comment = None
self.context = None
self.after = after
self._next = None
self.shape = shape
self.on_error = None
self._on_error_handler = None
def get_shape(self):
"""graphviz shape"""
return self.shape or self.default_shape
def set_parent(self, parent):
"""set/link the step parent (flow/router)"""
self._parent = parent
@property
def next(self):
return self._next
@property
def parent(self):
"""step parent (flow/router)"""
return self._parent
def set_next(self, key: str):
"""set/insert the key as next after this step, optionally remove other keys"""
if not self.next:
self._next = [key]
elif key not in self.next:
self._next.append(key)
return self
def after_step(self, after):
"""specify the previous step name"""
# most steps only accept one source
self.after = [after] if after else []
return self
def after_state(self, after):
warnings.warn(
"This method is deprecated. Use after_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.after_step(after)
def error_handler(self, step_name: str = None, state_name=None):
"""set error handler step (on failure/raise of this step)"""
if state_name:
warnings.warn(
"The state_name parameter is deprecated. Use step_name instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
step_name = step_name or state_name
if not step_name:
raise MLRunInvalidArgumentError("Must specify step_name")
self.on_error = step_name
return self
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
"""init the step class"""
self.context = context
def _is_local_function(self, context):
return True
def get_children(self):
"""get child steps (for router/flow)"""
return []
def __iter__(self):
yield from []
@property
def fullname(self):
"""full path/name (include parents)"""
name = self.name or ""
if self._parent and self._parent.fullname:
name = path_splitter.join([self._parent.fullname, name])
return name.replace(":", "_") # replace for graphviz escaping
def _post_init(self, mode="sync"):
pass
def _set_error_handler(self):
"""init/link the error handler for this step"""
if self.on_error:
error_step = self.context.root.path_to_step(self.on_error)
self._on_error_handler = error_step.run
def _log_error(self, event, err, **kwargs):
"""on failure log (for sync mode)"""
self.context.logger.error(
f"step {self.name} got error {err} when processing an event:\n {event.body}"
)
message = traceback.format_exc()
self.context.logger.error(message)
self.context.push_error(
event, f"{err}\n{message}", source=self.fullname, **kwargs
)
def _call_error_handler(self, event, err, **kwargs):
"""call the error handler if exist"""
if self._on_error_handler:
event.error = str(err)
event.origin_state = self.fullname
return self._on_error_handler(event)
def path_to_step(self, path: str):
"""return step object from step relative/fullname"""
path = path or ""
tree = path.split(path_splitter)
next_level = self
for step in tree:
if step not in next_level:
raise GraphError(
f"step {step} doesnt exist in the graph under {next_level.fullname}"
)
next_level = next_level[step]
return next_level
def path_to_state(self, path: str):
warnings.warn(
"This method is deprecated. Use path_to_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.path_to_step(path)
def to(
self,
class_name: Union[str, type] = None,
name: str = None,
handler: str = None,
graph_shape: str = None,
function: str = None,
full_event: bool = None,
**class_args,
):
"""add a step right after this step and return the new step
example, a 4 step pipeline ending with a stream:
graph.to('URLDownloader')\
.to('ToParagraphs')\
.to(name='to_json', handler='json.dumps')\
.to('>>', 'to_v3io', path=stream_path)\
:param class_name: class name or step object to build the step from
for router steps the class name should start with '*'
for queue/stream step the class should be '>>' or '$queue'
:param name: unique name (and path) for the child step, default is class name
:param handler: class/function handler to invoke on run/event
:param graph_shape: graphviz shape name
:param function: function this step should run in
:param full_event: this step accepts the full event (not just body)
:param class_args: class init arguments
"""
if hasattr(self, "steps"):
parent = self
elif self._parent:
parent = self._parent
else:
raise GraphError(
f"step {self.name} parent is not set or its not part of a graph"
)
name, step = params_to_step(
class_name,
name,
handler,
graph_shape=graph_shape,
function=function,
full_event=full_event,
class_args=class_args,
)
step = parent._steps.update(name, step)
step.set_parent(parent)
if not hasattr(self, "steps"):
# check that its not the root, todo: in future may gave nested flows
step.after_step(self.name)
parent._last_added = step
return step
class TaskStep(BaseStep):
"""task execution step, runs a class or handler"""
kind = "task"
_dict_fields = _task_step_fields
_default_class = ""
def __init__(
self,
class_name: Union[str, type] = None,
class_args: dict = None,
handler: str = None,
name: str = None,
after: list = None,
full_event: bool = None,
function: str = None,
responder: bool = None,
):
super().__init__(name, after)
self.class_name = class_name
self.class_args = class_args or {}
self.handler = handler
self.function = function
self._handler = None
self._object = None
self._async_object = None
self.skip_context = None
self.context = None
self._class_object = None
self.responder = responder
self.full_event = full_event
self.on_error = None
self._inject_context = False
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
self.context = context
self._async_object = None
if not self._is_local_function(context):
# skip init of non local functions
return
if self.handler and not self.class_name:
# link to function
if callable(self.handler):
self._handler = self.handler
self.handler = self.handler.__name__
else:
self._handler = get_function(self.handler, namespace)
args = signature(self._handler).parameters
if args and "context" in list(args.keys()):
self._inject_context = True
return
if isinstance(self.class_name, type):
self._class_object = self.class_name
self.class_name = self.class_name.__name__
if not self._class_object:
if self.class_name == "$remote":
from mlrun.serving.remote import RemoteStep
self._class_object = RemoteStep
else:
self._class_object = get_class(
self.class_name or self._default_class, namespace
)
if not self._object or reset:
# init the step class + args
class_args = {}
for key, arg in self.class_args.items():
if key.startswith(callable_prefix):
class_args[key[1:]] = get_function(arg, namespace)
else:
class_args[key] = arg
class_args.update(extra_kwargs)
# add name and context only if target class can accept them
argspec = getfullargspec(self._class_object)
if argspec.varkw or "context" in argspec.args:
class_args["context"] = self.context
if argspec.varkw or "name" in argspec.args:
class_args["name"] = self.name
try:
self._object = self._class_object(**class_args)
except TypeError as exc:
raise TypeError(
f"failed to init step {self.name}, {exc}\n args={self.class_args}"
)
# determine the right class handler to use
handler = self.handler
if handler:
if not hasattr(self._object, handler):
raise GraphError(
f"handler ({handler}) specified but doesnt exist in class {self.class_name}"
)
else:
if hasattr(self._object, "do_event"):
handler = "do_event"
self.full_event = True
elif hasattr(self._object, "do"):
handler = "do"
if handler:
self._handler = getattr(self._object, handler, None)
self._set_error_handler()
if mode != "skip":
self._post_init(mode)
def _is_local_function(self, context):
# detect if the class is local (and should be initialized)
current_function = get_current_function(context)
if current_function == "*":
return True
if not self.function and not current_function:
return True
if (
self.function and self.function == "*"
) or self.function == current_function:
return True
return False
@property
def async_object(self):
"""return the sync or async (storey) class instance"""
return self._async_object or self._object
def clear_object(self):
self._object = None
def _post_init(self, mode="sync"):
if self._object and hasattr(self._object, "post_init"):
self._object.post_init(mode)
def respond(self):
"""mark this step as the responder.
step output will be returned as the flow result, no other step can follow
"""
self.responder = True
return self
def run(self, event, *args, **kwargs):
"""run this step, in async flows the run is done through storey"""
if not self._is_local_function(self.context):
# todo invoke remote via REST call
return event
if self.context.verbose:
self.context.logger.info(f"step {self.name} got event {event.body}")
# inject context parameter if it is expected by the handler
if self._inject_context:
kwargs["context"] = self.context
elif kwargs and "context" in kwargs:
del kwargs["context"]
try:
if self.full_event:
return self._handler(event, *args, **kwargs)
event.body = self._handler(event.body, *args, **kwargs)
except Exception as exc:
self._log_error(event, exc)
handled = self._call_error_handler(event, exc)
if not handled:
raise exc
event.terminated = True
return event
class RouterStep(TaskStep):
"""router step, implement routing logic for running child routes"""
kind = "router"
default_shape = "doubleoctagon"
_dict_fields = _task_step_fields + ["routes"]
_default_class = "mlrun.serving.ModelRouter"
def __init__(
self,
class_name: Union[str, type] = None,
class_args: dict = None,
handler: str = None,
routes: list = None,
name: str = None,
function: str = None,
):
super().__init__(class_name, class_args, handler, name=name, function=function)
self._routes: ObjectDict = None
self.routes = routes
def get_children(self):
"""get child steps (routes)"""
return self._routes.values()
@property
def routes(self):
"""child routes/steps, traffic is routed to routes based on router logic"""
return self._routes
@routes.setter
def routes(self, routes: dict):
self._routes = ObjectDict.from_dict(classes_map, routes, "task")
def add_route(self, key, route=None, class_name=None, handler=None, **class_args):
"""add child route step or class to the router
:param key: unique name (and route path) for the child step
:param route: child step object (Task, ..)
:param class_name: class name to build the route step from (when route is not provided)
:param class_args: class init arguments
:param handler: class handler to invoke on run/event
"""
if not route and not class_name:
raise MLRunInvalidArgumentError("route or class_name must be specified")
if not route:
route = TaskStep(class_name, class_args, handler=handler)
route = self._routes.update(key, route)
route.set_parent(self)
return route
def clear_children(self, routes: list):
"""clear child steps (routes)"""
if not routes:
routes = self._routes.keys()
for key in routes:
del self._routes[key]
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
if not self._is_local_function(context):
return
self.class_args = self.class_args or {}
super().init_object(
context, namespace, "skip", reset=reset, routes=self._routes, **extra_kwargs
)
for route in self._routes.values():
route.set_parent(self)
route.init_object(context, namespace, mode, reset=reset)
self._set_error_handler()
self._post_init(mode)
def __getitem__(self, name):
return self._routes[name]
def __setitem__(self, name, route):
self.add_route(name, route)
def __delitem__(self, key):
del self._routes[key]
def __iter__(self):
yield from self._routes.keys()
def plot(self, filename=None, format=None, source=None, **kw):
"""plot/save a graphviz plot"""
return _generate_graphviz(
self, _add_graphviz_router, filename, format, source=source, **kw
)
class QueueStep(BaseStep):
"""queue step, implement an async queue or represent a stream"""
kind = "queue"
default_shape = "cds"
_dict_fields = BaseStep._dict_fields + [
"path",
"shards",
"retention_in_hours",
"options",
]
def __init__(
self,
name: str = None,
path: str = None,
after: list = None,
shards: int = None,
retention_in_hours: int = None,
**options,
):
super().__init__(name, after)
self.path = path
self.shards = shards
self.retention_in_hours = retention_in_hours
self.options = options
self._stream = None
self._async_object = None
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
self.context = context
if self.path:
self._stream = get_stream_pusher(
self.path,
shards=self.shards,
retention_in_hours=self.retention_in_hours,
)
self._set_error_handler()
@property
def async_object(self):
return self._async_object
def after_step(self, after):
# queue steps accept multiple sources
if self.after:
if after:
self.after.append(after)
else:
self.after = [after] if after else []
return self
def after_state(self, after):
warnings.warn(
"This method is deprecated. Use after_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.after_step(after)
def run(self, event, *args, **kwargs):
data = event.body
if not data:
return event
if self._stream:
self._stream.push({"id": event.id, "body": data, "path": event.path})
event.terminated = True
event.body = None
return event
class FlowStep(BaseStep):
"""flow step, represent a workflow or DAG"""
kind = "flow"
_dict_fields = BaseStep._dict_fields + [
"steps",
"engine",
"default_final_step",
]
# TODO - remove once "states" is fully deprecated
@classmethod
def from_dict(cls, struct=None, fields=None, deprecated_fields: dict = None):
deprecated_fields = deprecated_fields or {}
deprecated_fields.update(
{"states": "steps", "default_final_state": "default_final_step"}
)
return super().from_dict(
struct, fields=fields, deprecated_fields=deprecated_fields
)
def __init__(
self,
name=None,
steps=None,
after: list = None,
engine=None,
final_step=None,
# TODO - remove once usage of "state" is fully deprecated
states=None,
final_state=None,
):
super().__init__(name, after)
if states:
warnings.warn(
"The states parameter is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
steps = steps or states
if final_state:
warnings.warn(
"The final_state parameter is deprecated. Use final_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
final_step = final_step or final_state
self._steps = None
self.steps = steps
self.engine = engine
# TODO - remove use of START_FROM_STATE once it's fully deprecated.
self.from_step = os.environ.get("START_FROM_STEP", None) or os.environ.get(
"START_FROM_STATE", None
)
self.final_step = final_step
self._last_added = None
self._controller = None
self._wait_for_result = False
self._source = None
self._start_steps = []
def get_children(self):
return self._steps.values()
@property
def steps(self):
"""child (workflow) steps"""
return self._steps
@property
def states(self):
warnings.warn(
"This property is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self._steps
@property
def controller(self):
"""async (storey) flow controller"""
return self._controller
@steps.setter
def steps(self, steps):
self._steps = ObjectDict.from_dict(classes_map, steps, "task")
@states.setter
def states(self, states):
warnings.warn(
"This property is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
self._steps = ObjectDict.from_dict(classes_map, states, "task")
def add_step(
self,
class_name=None,
name=None,
handler=None,
after=None,
before=None,
graph_shape=None,
function=None,
full_event: bool = None,
**class_args,
):
"""add task, queue or router step/class to the flow
use after/before to insert into a specific location
example:
graph = fn.set_topology("flow", exist_ok=True)
graph.add_step(class_name="Chain", name="s1")
graph.add_step(class_name="Chain", name="s3", after="$prev")
graph.add_step(class_name="Chain", name="s2", after="s1", before="s3")
:param class_name: class name or step object to build the step from
for router steps the class name should start with '*'
for queue/stream step the class should be '>>' or '$queue'
:param name: unique name (and path) for the child step, default is class name
:param handler: class/function handler to invoke on run/event
:param after: the step name this step comes after
can use $prev to indicate the last added step
:param before: string or list of next step names that will run after this step
:param graph_shape: graphviz shape name
:param function: function this step should run in
:param class_args: class init arguments
"""
name, step = params_to_step(
class_name,
name,
handler,
graph_shape=graph_shape,
function=function,
full_event=full_event,
class_args=class_args,
)
self.insert_step(name, step, after, before)
return step
def insert_state(self, key, state, after, before=None):
warnings.warn(
"This method is deprecated. Use insert_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.insert_step(key, state, after, before)
def insert_step(self, key, step, after, before=None):
"""insert step object into the flow, specify before and after"""
step = self._steps.update(key, step)
step.set_parent(self)
if after == "$prev" and len(self._steps) == 1:
after = None
previous = ""
if after:
if after == "$prev" and self._last_added:
previous = self._last_added.name
else:
if after not in self._steps.keys():
raise MLRunInvalidArgumentError(
f"cant set after, there is no step named {after}"
)
previous = after
step.after_step(previous)
if before:
if before not in self._steps.keys():
raise MLRunInvalidArgumentError(
f"cant set before, there is no step named {before}"
)
if before == step.name or before == previous:
raise GraphError(
f"graph loop, step {before} is specified in before and/or after {key}"
)
self[before].after_step(step.name)
self._last_added = step
return step
def clear_children(self, steps: list = None, states: list = None):
"""remove some or all of the states, empty/None for all"""
if states:
warnings.warn(
"This states parameter is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
steps = steps or states
if not steps:
steps = self._steps.keys()
for key in steps:
del self._steps[key]
def __getitem__(self, name):
return self._steps[name]
def __setitem__(self, name, step):
self.add_step(name, step)
def __delitem__(self, key):
del self._steps[key]
def __iter__(self):
yield from self._steps.keys()
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
self.context = context
self.check_and_process_graph()
for step in self._steps.values():
step.set_parent(self)
step.init_object(context, namespace, mode, reset=reset)
self._set_error_handler()
self._post_init(mode)
if self.engine != "sync":
self._build_async_flow()
def check_and_process_graph(self, allow_empty=False):
"""validate correct graph layout and initialize the .next links"""
if self.is_empty() and allow_empty:
self._start_steps = []
return [], None, []
def has_loop(step, previous):
for next_step in step.after or []:
if next_step in previous:
return step.name
downstream = has_loop(self[next_step], previous + [next_step])
if downstream:
return downstream
return None
start_steps = []
for step in self._steps.values():
step._next = None
if step.after:
loop_step = has_loop(step, [])
if loop_step:
raise GraphError(
f"Error, loop detected in step {loop_step}, graph must be acyclic (DAG)"
)
else:
start_steps.append(step.name)
responders = []
for step in self._steps.values():
if hasattr(step, "responder") and step.responder:
responders.append(step.name)
if step.on_error and step.on_error in start_steps:
start_steps.remove(step.on_error)
if step.after:
prev_step = step.after[0]
self[prev_step].set_next(step.name)
if self.on_error and self.on_error in start_steps:
start_steps.remove(self.on_error)
if (
len(responders) > 1
): # should not have multiple steps which respond to request
raise GraphError(
f'there are more than one responder steps in the graph ({",".join(responders)})'
)
if self.from_step:
if self.from_step not in self.steps:
raise GraphError(
f"from_step ({self.from_step}) specified and not found in graph steps"
)
start_steps = [self.from_step]
self._start_steps = [self[name] for name in start_steps]
def get_first_function_step(step, current_function):
# find the first step which belongs to the function
if (
hasattr(step, "function")
and step.function
and step.function == current_function
):
return step
for item in step.next or []:
next_step = self[item]
returned_step = get_first_function_step(next_step, current_function)
if returned_step:
return returned_step
current_function = get_current_function(self.context)
if current_function and current_function != "*":
new_start_steps = []
for from_step in self._start_steps:
step = get_first_function_step(from_step, current_function)
if step:
new_start_steps.append(step)
if not new_start_steps:
raise GraphError(
f"did not find steps pointing to current function ({current_function})"
)
self._start_steps = new_start_steps
if self.engine == "sync" and len(self._start_steps) > 1:
raise GraphError(
"sync engine can only have one starting step (without .after)"
)
default_final_step = None
if self.final_step:
if self.final_step not in self.steps:
raise GraphError(
f"final_step ({self.final_step}) specified and not found in graph steps"
)
default_final_step = self.final_step
elif len(self._start_steps) == 1:
# find the final step in case if a simple sequence of steps
next_obj = self._start_steps[0]
while next_obj:
next = next_obj.next
if not next:
default_final_step = next_obj.name
break
next_obj = self[next[0]] if len(next) == 1 else None
return self._start_steps, default_final_step, responders
def set_flow_source(self, source):
"""set the async flow (storey) source"""
self._source = source
def _build_async_flow(self):
"""initialize and build the async/storey DAG"""
try:
import storey
except ImportError:
raise GraphError("storey package is not installed, use pip install storey")
def process_step(state, step, root):
if not state._is_local_function(self.context):
return
for item in state.next or []:
next_state = root[item]
if next_state.async_object:
next_step = step.to(next_state.async_object)
process_step(next_state, next_step, root)
for step in self._steps.values():
if hasattr(step, "async_object") and step._is_local_function(self.context):
if step.kind == StepKinds.queue:
skip_stream = self.context.is_mock and step.next
if step.path and not skip_stream:
stream_path = step.path
endpoint = None
if "://" in stream_path:
endpoint, stream_path = parse_v3io_path(step.path)
stream_path = stream_path.strip("/")
step._async_object = storey.StreamTarget(
storey.V3ioDriver(endpoint), stream_path
)
else:
step._async_object = storey.Map(lambda x: x)
elif not step.async_object or not hasattr(
step.async_object, "_outlets"
):
# if regular class, wrap with storey Map
step._async_object = storey.Map(
step._handler,
full_event=step.full_event,
name=step.name,
context=self.context,
)
if not step.next and hasattr(step, "responder") and step.responder:
# if responder step (return result), add Complete()
step.async_object.to(storey.Complete(full_event=True))
self._wait_for_result = True
# todo: allow source array (e.g. data->json loads..)
source = self._source or storey.SyncEmitSource()
for next_state in self._start_steps:
next_step = source.to(next_state.async_object)
process_step(next_state, next_step, self)
for step in self._steps.values():
# add error handler hooks
if (step.on_error or self.on_error) and step.async_object:
error_step = self._steps[step.on_error or self.on_error]
# never set a step as its own error handler
if step != error_step:
step.async_object.set_recovery_step(error_step.async_object)
self._controller = source.run()
def get_queue_links(self):
"""return dict of function and queue its listening on, for building stream triggers"""
links = {}
for step in self.get_children():
if step.kind == StepKinds.queue:
for item in step.next or []:
next_step = self[item]
if not next_step.function:
raise GraphError(
f"child function name must be specified in steps ({next_step.name}) which follow a queue"
)
if next_step.function in links:
raise GraphError(
f"function ({next_step.function}) cannot read from multiple queues"
)
links[next_step.function] = step
return links
def init_queues(self):
"""init/create the streams used in this flow"""
for step in self.get_children():
if step.kind == StepKinds.queue:
step.init_object(self.context, None)
def is_empty(self):
"""is the graph empty (no child steps)"""
return len(self.steps) == 0
@staticmethod
async def _await_and_return_id(awaitable, event):
await awaitable
event = copy(event)
event.body = {"id": event.id}
return event
def run(self, event, *args, **kwargs):
if self._controller:
# async flow (using storey)
event._awaitable_result = None
if config.datastore.async_source_mode == "enabled":
resp_awaitable = self._controller.emit(
event, await_result=self._wait_for_result
)
if self._wait_for_result:
return resp_awaitable
return self._await_and_return_id(resp_awaitable, event)
else:
resp = self._controller.emit(
event, return_awaitable_result=self._wait_for_result
)
if self._wait_for_result and resp:
return resp.await_result()
event = copy(event)
event.body = {"id": event.id}
return event
if len(self._start_steps) == 0:
return event
next_obj = self._start_steps[0]
while next_obj:
try:
event = next_obj.run(event, *args, **kwargs)
except Exception as exc:
self._log_error(event, exc, failed_step=next_obj.name)
handled = self._call_error_handler(event, exc)
if not handled:
raise exc
event.terminated = True
return event
if hasattr(event, "terminated") and event.terminated:
return event
next = next_obj.next
if next and len(next) > 1:
raise GraphError(
f"synchronous flow engine doesnt support branches use async, step={next_obj.name}"
)
next_obj = self[next[0]] if next else None
return event
def wait_for_completion(self):
"""wait for completion of run in async flows"""
if self._controller:
if hasattr(self._controller, "terminate"):
self._controller.terminate()
return self._controller.await_termination()
def plot(self, filename=None, format=None, source=None, targets=None, **kw):
"""plot/save graph using graphviz"""
return _generate_graphviz(
self,
_add_graphviz_flow,
filename,
format,
source=source,
targets=targets,
**kw,
)
class RootFlowStep(FlowStep):
"""root flow step"""
kind = "root"
_dict_fields = ["steps", "engine", "final_step", "on_error"]
# TODO - remove once "final_state" is fully deprecated
@classmethod
def from_dict(cls, struct=None, fields=None):
return super().from_dict(
struct, fields=fields, deprecated_fields={"final_state": "final_step"}
)
classes_map = {
"task": TaskStep,
"router": RouterStep,
"flow": FlowStep,
"queue": QueueStep,
}
def get_current_function(context):
if context and hasattr(context, "current_function"):
return context.current_function or ""
return ""
def _add_graphviz_router(graph, step, source=None, **kwargs):
if source:
graph.node("_start", source.name, shape=source.shape, style="filled")
graph.edge("_start", step.fullname)
graph.node(step.fullname, label=step.name, shape=step.get_shape())
for route in step.get_children():
graph.node(route.fullname, label=route.name, shape=route.get_shape())
graph.edge(step.fullname, route.fullname)
def _add_graphviz_flow(
graph, step, source=None, targets=None,
):
start_steps, default_final_step, responders = step.check_and_process_graph(
allow_empty=True
)
graph.node("_start", source.name, shape=source.shape, style="filled")
for start_step in start_steps:
graph.edge("_start", start_step.fullname)
for child in step.get_children():
kind = child.kind
if kind == StepKinds.router:
with graph.subgraph(name="cluster_" + child.fullname) as sg:
_add_graphviz_router(sg, child)
else:
graph.node(child.fullname, label=child.name, shape=child.get_shape())
after = child.after or []
for item in after:
previous_object = step[item]
kw = (
{"ltail": "cluster_" + previous_object.fullname}
if previous_object.kind == StepKinds.router
else {}
)
graph.edge(previous_object.fullname, child.fullname, **kw)
if child.on_error:
graph.edge(child.fullname, child.on_error, style="dashed")
# draw targets after the last step (if specified)
if targets:
for target in targets or []:
graph.node(target.fullname, label=target.name, shape=target.get_shape())
last_step = target.after or default_final_step
if last_step:
graph.edge(last_step, target.fullname)
def _generate_graphviz(
step, renderer, filename=None, format=None, source=None, targets=None, **kw,
):
try:
from graphviz import Digraph
except ImportError:
raise ImportError(
'graphviz is not installed, run "pip install graphviz" first!'
)
graph = Digraph("mlrun-flow", format="jpg")
graph.attr(compound="true", **kw)
source = source or BaseStep("start", shape="egg")
renderer(graph, step, source=source, targets=targets)
if filename:
suffix = pathlib.Path(filename).suffix
if suffix:
filename = filename[: -len(suffix)]
format = format or suffix[1:]
format = format or "png"
graph.render(filename, format=format)
return graph
def graph_root_setter(server, graph):
"""set graph root object from class or dict"""
if graph:
if isinstance(graph, dict):
kind = graph.get("kind")
elif hasattr(graph, "kind"):
kind = graph.kind
else:
raise MLRunInvalidArgumentError("graph must be a dict or a valid object")
if kind == StepKinds.router:
server._graph = server._verify_dict(graph, "graph", RouterStep)
elif not kind or kind == StepKinds.root:
server._graph = server._verify_dict(graph, "graph", RootFlowStep)
else:
raise GraphError(f"illegal root step {kind}")
def get_name(name, class_name):
"""get task name from provided name or class"""
if name:
return name
if not class_name:
raise MLRunInvalidArgumentError("name or class_name must be provided")
if isinstance(class_name, type):
return class_name.__name__
return class_name
def params_to_state(
class_name,
name,
handler=None,
graph_shape=None,
function=None,
full_event=None,
class_args=None,
):
warnings.warn(
"This method is deprecated. Use param_to_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return params_to_step(
class_name, name, handler, graph_shape, function, full_event, class_args
)
def params_to_step(
class_name,
name,
handler=None,
graph_shape=None,
function=None,
full_event=None,
class_args=None,
):
"""return step object from provided params or classes/objects"""
if class_name and hasattr(class_name, "to_dict"):
struct = class_name.to_dict()
kind = struct.get("kind", StepKinds.task)
name = name or struct.get("name", struct.get("class_name"))
cls = classes_map.get(kind, RootFlowStep)
step = cls.from_dict(struct)
step.function = function
step.full_event = full_event
elif class_name and class_name in [">>", "$queue"]:
if "path" not in class_args:
raise MLRunInvalidArgumentError(
"path=<stream path or None> must be specified for queues"
)
if not name:
raise MLRunInvalidArgumentError("queue name must be specified")
step = QueueStep(name, **class_args)
elif class_name and class_name.startswith("*"):
routes = class_args.get("routes", None)
class_name = class_name[1:]
name = get_name(name, class_name or "router")
step = RouterStep(
class_name, class_args, handler, name=name, function=function, routes=routes
)
elif class_name or handler:
name = get_name(name, class_name)
step = TaskStep(
class_name,
class_args,
handler,
name=name,
function=function,
full_event=full_event,
)
else:
raise MLRunInvalidArgumentError("class_name or handler must be provided")
if graph_shape:
step.shape = graph_shape
return name, step
|
en
| 0.70146
|
# Copyright 2018 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. error in graph topology or configuration graphviz shape set/link the step parent (flow/router) step parent (flow/router) set/insert the key as next after this step, optionally remove other keys specify the previous step name # most steps only accept one source # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove set error handler step (on failure/raise of this step) # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove init the step class get child steps (for router/flow) full path/name (include parents) # replace for graphviz escaping init/link the error handler for this step on failure log (for sync mode) call the error handler if exist return step object from step relative/fullname # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove add a step right after this step and return the new step example, a 4 step pipeline ending with a stream: graph.to('URLDownloader')\ .to('ToParagraphs')\ .to(name='to_json', handler='json.dumps')\ .to('>>', 'to_v3io', path=stream_path)\ :param class_name: class name or step object to build the step from for router steps the class name should start with '*' for queue/stream step the class should be '>>' or '$queue' :param name: unique name (and path) for the child step, default is class name :param handler: class/function handler to invoke on run/event :param graph_shape: graphviz shape name :param function: function this step should run in :param full_event: this step accepts the full event (not just body) :param class_args: class init arguments # check that its not the root, todo: in future may gave nested flows task execution step, runs a class or handler # skip init of non local functions # link to function # init the step class + args # add name and context only if target class can accept them # determine the right class handler to use # detect if the class is local (and should be initialized) return the sync or async (storey) class instance mark this step as the responder. step output will be returned as the flow result, no other step can follow run this step, in async flows the run is done through storey # todo invoke remote via REST call # inject context parameter if it is expected by the handler router step, implement routing logic for running child routes get child steps (routes) child routes/steps, traffic is routed to routes based on router logic add child route step or class to the router :param key: unique name (and route path) for the child step :param route: child step object (Task, ..) :param class_name: class name to build the route step from (when route is not provided) :param class_args: class init arguments :param handler: class handler to invoke on run/event clear child steps (routes) plot/save a graphviz plot queue step, implement an async queue or represent a stream # queue steps accept multiple sources # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove flow step, represent a workflow or DAG # TODO - remove once "states" is fully deprecated # TODO - remove once usage of "state" is fully deprecated # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove # TODO - remove use of START_FROM_STATE once it's fully deprecated. child (workflow) steps # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove async (storey) flow controller # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove add task, queue or router step/class to the flow use after/before to insert into a specific location example: graph = fn.set_topology("flow", exist_ok=True) graph.add_step(class_name="Chain", name="s1") graph.add_step(class_name="Chain", name="s3", after="$prev") graph.add_step(class_name="Chain", name="s2", after="s1", before="s3") :param class_name: class name or step object to build the step from for router steps the class name should start with '*' for queue/stream step the class should be '>>' or '$queue' :param name: unique name (and path) for the child step, default is class name :param handler: class/function handler to invoke on run/event :param after: the step name this step comes after can use $prev to indicate the last added step :param before: string or list of next step names that will run after this step :param graph_shape: graphviz shape name :param function: function this step should run in :param class_args: class init arguments # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove insert step object into the flow, specify before and after remove some or all of the states, empty/None for all # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove validate correct graph layout and initialize the .next links # should not have multiple steps which respond to request # find the first step which belongs to the function # find the final step in case if a simple sequence of steps set the async flow (storey) source initialize and build the async/storey DAG # if regular class, wrap with storey Map # if responder step (return result), add Complete() # todo: allow source array (e.g. data->json loads..) # add error handler hooks # never set a step as its own error handler return dict of function and queue its listening on, for building stream triggers init/create the streams used in this flow is the graph empty (no child steps) # async flow (using storey) wait for completion of run in async flows plot/save graph using graphviz root flow step # TODO - remove once "final_state" is fully deprecated # draw targets after the last step (if specified) set graph root object from class or dict get task name from provided name or class # TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove return step object from provided params or classes/objects
| 1.763054
| 2
|
lib/training/schemes/pattern/eig.py
|
shamim-hussain/egt
| 7
|
6629194
|
<reponame>shamim-hussain/egt<gh_stars>1-10
import tensorflow as tf
from tensorflow.keras import (optimizers, losses, metrics)
from tqdm import tqdm
from sklearn.metrics import recall_score, accuracy_score, confusion_matrix
import numpy as np
import os
from lib.base.dotdict import HDict
from lib.data.datasets.sbm_pattern import EigenDataset
from lib.models.sbm_pattern.dc import DCEigTransformer
from lib.training.schemes.scheme_base import BaseEigModelScheme
from lib.training.schemes.pattern._eval import SBMPATTERNEval
class SBMPDCEig(SBMPATTERNEval, BaseEigModelScheme):
def get_default_config(self):
config_dict = super().get_default_config()
config_dict.update(
dataset_name = 'sbm_pattern',
class_sizes = [979220, 209900],
)
return config_dict
def get_dataset_config(self, splits=['training','validation']):
dataset_config, _ = super().get_dataset_config()
return dataset_config, EigenDataset
def get_model_config(self):
model_config, _ = super().get_model_config()
return model_config, DCEigTransformer
def get_loss(self):
class_sizes = np.array(self.config.class_sizes, dtype='float32')
class_weights = class_sizes.sum() - class_sizes
class_weights = class_weights/class_weights.sum()
class_weights = tf.constant(class_weights, dtype=tf.float32)
def loss(y_true, y_pred):
weights = tf.gather(class_weights, tf.cast(y_true, tf.int32))
w_xent = weights * losses.sparse_categorical_crossentropy(y_true, y_pred,
from_logits=True, axis=-1)
return w_xent
return loss
def get_metrics(self):
acc = metrics.SparseCategoricalAccuracy(name='acc')
return [acc]
SCHEME = SBMPDCEig
|
import tensorflow as tf
from tensorflow.keras import (optimizers, losses, metrics)
from tqdm import tqdm
from sklearn.metrics import recall_score, accuracy_score, confusion_matrix
import numpy as np
import os
from lib.base.dotdict import HDict
from lib.data.datasets.sbm_pattern import EigenDataset
from lib.models.sbm_pattern.dc import DCEigTransformer
from lib.training.schemes.scheme_base import BaseEigModelScheme
from lib.training.schemes.pattern._eval import SBMPATTERNEval
class SBMPDCEig(SBMPATTERNEval, BaseEigModelScheme):
def get_default_config(self):
config_dict = super().get_default_config()
config_dict.update(
dataset_name = 'sbm_pattern',
class_sizes = [979220, 209900],
)
return config_dict
def get_dataset_config(self, splits=['training','validation']):
dataset_config, _ = super().get_dataset_config()
return dataset_config, EigenDataset
def get_model_config(self):
model_config, _ = super().get_model_config()
return model_config, DCEigTransformer
def get_loss(self):
class_sizes = np.array(self.config.class_sizes, dtype='float32')
class_weights = class_sizes.sum() - class_sizes
class_weights = class_weights/class_weights.sum()
class_weights = tf.constant(class_weights, dtype=tf.float32)
def loss(y_true, y_pred):
weights = tf.gather(class_weights, tf.cast(y_true, tf.int32))
w_xent = weights * losses.sparse_categorical_crossentropy(y_true, y_pred,
from_logits=True, axis=-1)
return w_xent
return loss
def get_metrics(self):
acc = metrics.SparseCategoricalAccuracy(name='acc')
return [acc]
SCHEME = SBMPDCEig
|
none
| 1
| 2.047554
| 2
|
|
problem_2/even_fibonacci.py
|
plilja/project-euler
| 0
|
6629195
|
from math import sqrt, log
from common.functions import fibonacci
PSI = (1.0 + sqrt(5)) / 2.0
def even_fibonacci(upper_limit):
return even_fibonacci_mathematical(upper_limit)
def sum_of_nth_first_fibonnaci_numbers(last_even_n):
return fibonacci(last_even_n + 2) - 1
def even_fibonacci_mathematical(upper_limit):
n = int(round(log(upper_limit * sqrt(5)) / log(PSI)))
last_even_n = n
if fibonacci(n) % 2 != 0:
last_even_n = n - 1
if fibonacci(n - 1) % 2 != 0:
last_even_n = n - 2
return sum_of_nth_first_fibonnaci_numbers(last_even_n) / 2
def even_fibonacci_programmatic(upper_limit):
return sum(filter(lambda x: x % 2 == 0, fibonacci_numbers_up_to(upper_limit)))
def fibonacci_numbers_up_to(upper_limit):
if upper_limit <= 0:
return []
res = [1, 1]
while res[-1] <= upper_limit:
res += [res[-2] + res[-1]]
return res[0:-1]
|
from math import sqrt, log
from common.functions import fibonacci
PSI = (1.0 + sqrt(5)) / 2.0
def even_fibonacci(upper_limit):
return even_fibonacci_mathematical(upper_limit)
def sum_of_nth_first_fibonnaci_numbers(last_even_n):
return fibonacci(last_even_n + 2) - 1
def even_fibonacci_mathematical(upper_limit):
n = int(round(log(upper_limit * sqrt(5)) / log(PSI)))
last_even_n = n
if fibonacci(n) % 2 != 0:
last_even_n = n - 1
if fibonacci(n - 1) % 2 != 0:
last_even_n = n - 2
return sum_of_nth_first_fibonnaci_numbers(last_even_n) / 2
def even_fibonacci_programmatic(upper_limit):
return sum(filter(lambda x: x % 2 == 0, fibonacci_numbers_up_to(upper_limit)))
def fibonacci_numbers_up_to(upper_limit):
if upper_limit <= 0:
return []
res = [1, 1]
while res[-1] <= upper_limit:
res += [res[-2] + res[-1]]
return res[0:-1]
|
none
| 1
| 3.804041
| 4
|
|
Hackerrank/30 Days of Code/30-inheritance.py
|
PROxZIMA/Competitive-Coding
| 1
|
6629196
|
class Person:
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName
self.lastName = lastName
self.idNumber = idNumber
def printPerson(self):
print("Name:", self.lastName + ",", self.firstName)
print("ID:", self.idNumber)
class Student(Person):
def __init__(self, firstName, lastName, idNumber, scores):
super().__init__(firstName, lastName, idNumber)
self.scores = scores
def calculate(self):
marks = sum(scores)/len(scores)
if 90 <= marks <= 100:
return 'O'
elif 80 <= marks < 90:
return 'E'
elif 70 <= marks < 80:
return 'A'
elif 55 <= marks < 70:
return 'P'
elif 40 <= marks < 50:
return 'D'
else:
return 'T'
firstName, lastName, idNum = input().split()
numScores = input()
scores = list(map(int, input().split()))
s = Student(firstName, lastName, idNum, scores)
s.printPerson()
print("Grade:", s.calculate())
|
class Person:
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName
self.lastName = lastName
self.idNumber = idNumber
def printPerson(self):
print("Name:", self.lastName + ",", self.firstName)
print("ID:", self.idNumber)
class Student(Person):
def __init__(self, firstName, lastName, idNumber, scores):
super().__init__(firstName, lastName, idNumber)
self.scores = scores
def calculate(self):
marks = sum(scores)/len(scores)
if 90 <= marks <= 100:
return 'O'
elif 80 <= marks < 90:
return 'E'
elif 70 <= marks < 80:
return 'A'
elif 55 <= marks < 70:
return 'P'
elif 40 <= marks < 50:
return 'D'
else:
return 'T'
firstName, lastName, idNum = input().split()
numScores = input()
scores = list(map(int, input().split()))
s = Student(firstName, lastName, idNum, scores)
s.printPerson()
print("Grade:", s.calculate())
|
none
| 1
| 3.697751
| 4
|
|
lib/private/copy_file.bzl
|
alexeagle/bazel-lib
| 0
|
6629197
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# LOCAL MODIFICATIONS
# this has a PR patched in on top of the original
# https://github.com/bazelbuild/bazel-skylib/blob/7b859037a673db6f606661323e74c5d4751595e6/rules/private/copy_file_private.bzl
# https://github.com/bazelbuild/bazel-skylib/pull/324
"""Implementation of copy_file macro and underlying rules.
These rules copy a file to another location using Bash (on Linux/macOS) or
cmd.exe (on Windows). `_copy_xfile` marks the resulting file executable,
`_copy_file` does not.
"""
load(":copy_common.bzl", _COPY_EXECUTION_REQUIREMENTS = "COPY_EXECUTION_REQUIREMENTS")
load(":directory_path.bzl", "DirectoryPathInfo")
def _copy_cmd(ctx, src, src_path, dst):
# Most Windows binaries built with MSVC use a certain argument quoting
# scheme. Bazel uses that scheme too to quote arguments. However,
# cmd.exe uses different semantics, so Bazel's quoting is wrong here.
# To fix that we write the command to a .bat file so no command line
# quoting or escaping is required.
# Put a hash of the file name into the name of the generated batch file to
# make it unique within the package, so that users can define multiple copy_file's.
bat = ctx.actions.declare_file("%s-%s-cmd.bat" % (ctx.label.name, hash(src_path)))
# Flags are documented at
# https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/copy
cmd_tmpl = "@copy /Y \"{src}\" \"{dst}\" >NUL"
mnemonic = "CopyFile"
progress_message = "Copying file %s" % src_path
ctx.actions.write(
output = bat,
# Do not use lib/shell.bzl's shell.quote() method, because that uses
# Bash quoting syntax, which is different from cmd.exe's syntax.
content = cmd_tmpl.format(
src = src_path.replace("/", "\\"),
dst = dst.path.replace("/", "\\"),
),
is_executable = True,
)
ctx.actions.run(
inputs = [src],
tools = [bat],
outputs = [dst],
executable = "cmd.exe",
arguments = ["/C", bat.path.replace("/", "\\")],
mnemonic = mnemonic,
progress_message = progress_message,
use_default_shell_env = True,
execution_requirements = _COPY_EXECUTION_REQUIREMENTS,
)
def _copy_bash(ctx, src, src_path, dst):
cmd_tmpl = "cp -f \"$1\" \"$2\""
mnemonic = "CopyFile"
progress_message = "Copying file %s" % src_path
ctx.actions.run_shell(
tools = [src],
outputs = [dst],
command = cmd_tmpl,
arguments = [src_path, dst.path],
mnemonic = mnemonic,
progress_message = progress_message,
use_default_shell_env = True,
execution_requirements = _COPY_EXECUTION_REQUIREMENTS,
)
def copy_file_action(ctx, src, dst, dir_path = None, is_windows = False):
"""Helper function that creates an action to copy a file from src to dst.
If src is a TreeArtifact, dir_path must be specified as the path within
the TreeArtifact to the file to copy.
This helper is used by copy_file. It is exposed as a public API so it can be used within
other rule implementations.
Args:
ctx: The rule context.
src: The source file to copy or TreeArtifact to copy a single file out of.
dst: The destination file.
dir_path: If src is a TreeArtifact, the path within the TreeArtifact to the file to copy.
is_windows: If true, an cmd.exe action is created so there is no bash dependency.
"""
if dst.is_directory:
fail("dst must not be a TreeArtifact")
if src.is_directory:
if not dir_path:
fail("dir_path must be set if src is a TreeArtifact")
src_path = "/".join([src.path, dir_path])
else:
src_path = src.path
if is_windows:
_copy_cmd(ctx, src, src_path, dst)
else:
_copy_bash(ctx, src, src_path, dst)
def _copy_file_impl(ctx):
if ctx.attr.allow_symlink:
if len(ctx.files.src) != 1:
fail("src must be a single file when allow_symlink is True")
if ctx.files.src[0].is_directory:
fail("cannot use copy_file to create a symlink to a directory")
ctx.actions.symlink(
output = ctx.outputs.out,
target_file = ctx.files.src[0],
is_executable = ctx.attr.is_executable,
)
elif DirectoryPathInfo in ctx.attr.src:
copy_file_action(
ctx,
ctx.attr.src[DirectoryPathInfo].directory,
ctx.outputs.out,
dir_path = ctx.attr.src[DirectoryPathInfo].path,
is_windows = ctx.attr.is_windows,
)
else:
if len(ctx.files.src) != 1:
fail("src must be a single file or a target that provides a DirectoryPathInfo")
if ctx.files.src[0].is_directory:
fail("cannot use copy_file on a directory; try copy_directory instead")
copy_file_action(ctx, ctx.files.src[0], ctx.outputs.out, is_windows = ctx.attr.is_windows)
files = depset(direct = [ctx.outputs.out])
runfiles = ctx.runfiles(files = [ctx.outputs.out])
if ctx.attr.is_executable:
return [DefaultInfo(files = files, runfiles = runfiles, executable = ctx.outputs.out)]
else:
return [DefaultInfo(files = files, runfiles = runfiles)]
_ATTRS = {
"src": attr.label(mandatory = True, allow_files = True),
"is_windows": attr.bool(mandatory = True),
"is_executable": attr.bool(mandatory = True),
"allow_symlink": attr.bool(mandatory = True),
"out": attr.output(mandatory = True),
}
_copy_file = rule(
implementation = _copy_file_impl,
provides = [DefaultInfo],
attrs = _ATTRS,
)
_copy_xfile = rule(
implementation = _copy_file_impl,
executable = True,
provides = [DefaultInfo],
attrs = _ATTRS,
)
def copy_file(name, src, out, is_executable = False, allow_symlink = False, **kwargs):
"""Copies a file or directory to another location.
`native.genrule()` is sometimes used to copy files (often wishing to rename them). The 'copy_file' rule does this with a simpler interface than genrule.
This rule uses a Bash command on Linux/macOS/non-Windows, and a cmd.exe command on Windows (no Bash is required).
If using this rule with source directories, it is recommended that you use the
`--host_jvm_args=-DBAZEL_TRACK_SOURCE_DIRECTORIES=1` startup option so that changes
to files within source directories are detected. See
https://github.com/bazelbuild/bazel/commit/c64421bc35214f0414e4f4226cc953e8c55fa0d2
for more context.
Args:
name: Name of the rule.
src: A Label. The file to make a copy of.
(Can also be the label of a rule that generates a file.)
out: Path of the output file, relative to this package.
is_executable: A boolean. Whether to make the output file executable. When
True, the rule's output can be executed using `bazel run` and can be
in the srcs of binary and test rules that require executable sources.
WARNING: If `allow_symlink` is True, `src` must also be executable.
allow_symlink: A boolean. Whether to allow symlinking instead of copying.
When False, the output is always a hard copy. When True, the output
*can* be a symlink, but there is no guarantee that a symlink is
created (i.e., at the time of writing, we don't create symlinks on
Windows). Set this to True if you need fast copying and your tools can
handle symlinks (which most UNIX tools can).
**kwargs: further keyword arguments, e.g. `visibility`
"""
copy_file_impl = _copy_file
if is_executable:
copy_file_impl = _copy_xfile
copy_file_impl(
name = name,
src = src,
out = out,
is_windows = select({
"@bazel_tools//src/conditions:host_windows": True,
"//conditions:default": False,
}),
is_executable = is_executable,
allow_symlink = allow_symlink,
**kwargs
)
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# LOCAL MODIFICATIONS
# this has a PR patched in on top of the original
# https://github.com/bazelbuild/bazel-skylib/blob/7b859037a673db6f606661323e74c5d4751595e6/rules/private/copy_file_private.bzl
# https://github.com/bazelbuild/bazel-skylib/pull/324
"""Implementation of copy_file macro and underlying rules.
These rules copy a file to another location using Bash (on Linux/macOS) or
cmd.exe (on Windows). `_copy_xfile` marks the resulting file executable,
`_copy_file` does not.
"""
load(":copy_common.bzl", _COPY_EXECUTION_REQUIREMENTS = "COPY_EXECUTION_REQUIREMENTS")
load(":directory_path.bzl", "DirectoryPathInfo")
def _copy_cmd(ctx, src, src_path, dst):
# Most Windows binaries built with MSVC use a certain argument quoting
# scheme. Bazel uses that scheme too to quote arguments. However,
# cmd.exe uses different semantics, so Bazel's quoting is wrong here.
# To fix that we write the command to a .bat file so no command line
# quoting or escaping is required.
# Put a hash of the file name into the name of the generated batch file to
# make it unique within the package, so that users can define multiple copy_file's.
bat = ctx.actions.declare_file("%s-%s-cmd.bat" % (ctx.label.name, hash(src_path)))
# Flags are documented at
# https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/copy
cmd_tmpl = "@copy /Y \"{src}\" \"{dst}\" >NUL"
mnemonic = "CopyFile"
progress_message = "Copying file %s" % src_path
ctx.actions.write(
output = bat,
# Do not use lib/shell.bzl's shell.quote() method, because that uses
# Bash quoting syntax, which is different from cmd.exe's syntax.
content = cmd_tmpl.format(
src = src_path.replace("/", "\\"),
dst = dst.path.replace("/", "\\"),
),
is_executable = True,
)
ctx.actions.run(
inputs = [src],
tools = [bat],
outputs = [dst],
executable = "cmd.exe",
arguments = ["/C", bat.path.replace("/", "\\")],
mnemonic = mnemonic,
progress_message = progress_message,
use_default_shell_env = True,
execution_requirements = _COPY_EXECUTION_REQUIREMENTS,
)
def _copy_bash(ctx, src, src_path, dst):
cmd_tmpl = "cp -f \"$1\" \"$2\""
mnemonic = "CopyFile"
progress_message = "Copying file %s" % src_path
ctx.actions.run_shell(
tools = [src],
outputs = [dst],
command = cmd_tmpl,
arguments = [src_path, dst.path],
mnemonic = mnemonic,
progress_message = progress_message,
use_default_shell_env = True,
execution_requirements = _COPY_EXECUTION_REQUIREMENTS,
)
def copy_file_action(ctx, src, dst, dir_path = None, is_windows = False):
"""Helper function that creates an action to copy a file from src to dst.
If src is a TreeArtifact, dir_path must be specified as the path within
the TreeArtifact to the file to copy.
This helper is used by copy_file. It is exposed as a public API so it can be used within
other rule implementations.
Args:
ctx: The rule context.
src: The source file to copy or TreeArtifact to copy a single file out of.
dst: The destination file.
dir_path: If src is a TreeArtifact, the path within the TreeArtifact to the file to copy.
is_windows: If true, an cmd.exe action is created so there is no bash dependency.
"""
if dst.is_directory:
fail("dst must not be a TreeArtifact")
if src.is_directory:
if not dir_path:
fail("dir_path must be set if src is a TreeArtifact")
src_path = "/".join([src.path, dir_path])
else:
src_path = src.path
if is_windows:
_copy_cmd(ctx, src, src_path, dst)
else:
_copy_bash(ctx, src, src_path, dst)
def _copy_file_impl(ctx):
if ctx.attr.allow_symlink:
if len(ctx.files.src) != 1:
fail("src must be a single file when allow_symlink is True")
if ctx.files.src[0].is_directory:
fail("cannot use copy_file to create a symlink to a directory")
ctx.actions.symlink(
output = ctx.outputs.out,
target_file = ctx.files.src[0],
is_executable = ctx.attr.is_executable,
)
elif DirectoryPathInfo in ctx.attr.src:
copy_file_action(
ctx,
ctx.attr.src[DirectoryPathInfo].directory,
ctx.outputs.out,
dir_path = ctx.attr.src[DirectoryPathInfo].path,
is_windows = ctx.attr.is_windows,
)
else:
if len(ctx.files.src) != 1:
fail("src must be a single file or a target that provides a DirectoryPathInfo")
if ctx.files.src[0].is_directory:
fail("cannot use copy_file on a directory; try copy_directory instead")
copy_file_action(ctx, ctx.files.src[0], ctx.outputs.out, is_windows = ctx.attr.is_windows)
files = depset(direct = [ctx.outputs.out])
runfiles = ctx.runfiles(files = [ctx.outputs.out])
if ctx.attr.is_executable:
return [DefaultInfo(files = files, runfiles = runfiles, executable = ctx.outputs.out)]
else:
return [DefaultInfo(files = files, runfiles = runfiles)]
_ATTRS = {
"src": attr.label(mandatory = True, allow_files = True),
"is_windows": attr.bool(mandatory = True),
"is_executable": attr.bool(mandatory = True),
"allow_symlink": attr.bool(mandatory = True),
"out": attr.output(mandatory = True),
}
_copy_file = rule(
implementation = _copy_file_impl,
provides = [DefaultInfo],
attrs = _ATTRS,
)
_copy_xfile = rule(
implementation = _copy_file_impl,
executable = True,
provides = [DefaultInfo],
attrs = _ATTRS,
)
def copy_file(name, src, out, is_executable = False, allow_symlink = False, **kwargs):
"""Copies a file or directory to another location.
`native.genrule()` is sometimes used to copy files (often wishing to rename them). The 'copy_file' rule does this with a simpler interface than genrule.
This rule uses a Bash command on Linux/macOS/non-Windows, and a cmd.exe command on Windows (no Bash is required).
If using this rule with source directories, it is recommended that you use the
`--host_jvm_args=-DBAZEL_TRACK_SOURCE_DIRECTORIES=1` startup option so that changes
to files within source directories are detected. See
https://github.com/bazelbuild/bazel/commit/c64421bc35214f0414e4f4226cc953e8c55fa0d2
for more context.
Args:
name: Name of the rule.
src: A Label. The file to make a copy of.
(Can also be the label of a rule that generates a file.)
out: Path of the output file, relative to this package.
is_executable: A boolean. Whether to make the output file executable. When
True, the rule's output can be executed using `bazel run` and can be
in the srcs of binary and test rules that require executable sources.
WARNING: If `allow_symlink` is True, `src` must also be executable.
allow_symlink: A boolean. Whether to allow symlinking instead of copying.
When False, the output is always a hard copy. When True, the output
*can* be a symlink, but there is no guarantee that a symlink is
created (i.e., at the time of writing, we don't create symlinks on
Windows). Set this to True if you need fast copying and your tools can
handle symlinks (which most UNIX tools can).
**kwargs: further keyword arguments, e.g. `visibility`
"""
copy_file_impl = _copy_file
if is_executable:
copy_file_impl = _copy_xfile
copy_file_impl(
name = name,
src = src,
out = out,
is_windows = select({
"@bazel_tools//src/conditions:host_windows": True,
"//conditions:default": False,
}),
is_executable = is_executable,
allow_symlink = allow_symlink,
**kwargs
)
|
en
| 0.83984
|
# Copyright 2019 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # LOCAL MODIFICATIONS # this has a PR patched in on top of the original # https://github.com/bazelbuild/bazel-skylib/blob/7b859037a673db6f606661323e74c5d4751595e6/rules/private/copy_file_private.bzl # https://github.com/bazelbuild/bazel-skylib/pull/324 Implementation of copy_file macro and underlying rules. These rules copy a file to another location using Bash (on Linux/macOS) or cmd.exe (on Windows). `_copy_xfile` marks the resulting file executable, `_copy_file` does not. # Most Windows binaries built with MSVC use a certain argument quoting # scheme. Bazel uses that scheme too to quote arguments. However, # cmd.exe uses different semantics, so Bazel's quoting is wrong here. # To fix that we write the command to a .bat file so no command line # quoting or escaping is required. # Put a hash of the file name into the name of the generated batch file to # make it unique within the package, so that users can define multiple copy_file's. # Flags are documented at # https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/copy # Do not use lib/shell.bzl's shell.quote() method, because that uses # Bash quoting syntax, which is different from cmd.exe's syntax. Helper function that creates an action to copy a file from src to dst. If src is a TreeArtifact, dir_path must be specified as the path within the TreeArtifact to the file to copy. This helper is used by copy_file. It is exposed as a public API so it can be used within other rule implementations. Args: ctx: The rule context. src: The source file to copy or TreeArtifact to copy a single file out of. dst: The destination file. dir_path: If src is a TreeArtifact, the path within the TreeArtifact to the file to copy. is_windows: If true, an cmd.exe action is created so there is no bash dependency. Copies a file or directory to another location. `native.genrule()` is sometimes used to copy files (often wishing to rename them). The 'copy_file' rule does this with a simpler interface than genrule. This rule uses a Bash command on Linux/macOS/non-Windows, and a cmd.exe command on Windows (no Bash is required). If using this rule with source directories, it is recommended that you use the `--host_jvm_args=-DBAZEL_TRACK_SOURCE_DIRECTORIES=1` startup option so that changes to files within source directories are detected. See https://github.com/bazelbuild/bazel/commit/c64421bc35214f0414e4f4226cc953e8c55fa0d2 for more context. Args: name: Name of the rule. src: A Label. The file to make a copy of. (Can also be the label of a rule that generates a file.) out: Path of the output file, relative to this package. is_executable: A boolean. Whether to make the output file executable. When True, the rule's output can be executed using `bazel run` and can be in the srcs of binary and test rules that require executable sources. WARNING: If `allow_symlink` is True, `src` must also be executable. allow_symlink: A boolean. Whether to allow symlinking instead of copying. When False, the output is always a hard copy. When True, the output *can* be a symlink, but there is no guarantee that a symlink is created (i.e., at the time of writing, we don't create symlinks on Windows). Set this to True if you need fast copying and your tools can handle symlinks (which most UNIX tools can). **kwargs: further keyword arguments, e.g. `visibility`
| 1.664421
| 2
|
projects/forms/project.py
|
rogue26/processy.io
| 0
|
6629198
|
from django import forms
from projects.models import Client, Project
from bootstrap_modal_forms.forms import BSModalModelForm
class DateInput(forms.DateInput):
input_type = 'date'
class ClientForm(forms.ModelForm):
class Meta:
model = Client
exclude = ['timestamp']
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['name', 'division', 'internal', 'client', 'description', 'start_date']
labels = {
"name": "Project name",
"internal": "Internal or external client?"
}
widgets = {
'start_date': DateInput(), # default date-format %m/%d/%Y will be used
'description': forms.Textarea(attrs={'rows': 4}),
}
def __init__(self, *args, **kwargs):
# To get request.user. Do not use kwargs.pop('user', None) due to potential security hole
self.request = kwargs.pop('request')
super().__init__(*args, **kwargs)
def save(self, commit=True):
if commit:
self.instance.created_by = self.request.user
self.instance.organization = self.request.user.organization
return super().save(commit=commit)
class ProjectModalForm(BSModalModelForm):
class Meta:
model = Project
fields = ['name', 'division', 'internal', 'client', 'description', 'start_date']
labels = {
"name": "Project name",
"internal": "Internal or external client?"
}
widgets = {
'start_date': DateInput(), # default date-format %m/%d/%Y will be used
'description': forms.Textarea(attrs={'rows': 4}),
}
def save(self, commit=True):
if commit:
self.instance.created_by = self.request.user
self.instance.organization = self.request.user.organization
return super().save(commit=commit)
|
from django import forms
from projects.models import Client, Project
from bootstrap_modal_forms.forms import BSModalModelForm
class DateInput(forms.DateInput):
input_type = 'date'
class ClientForm(forms.ModelForm):
class Meta:
model = Client
exclude = ['timestamp']
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['name', 'division', 'internal', 'client', 'description', 'start_date']
labels = {
"name": "Project name",
"internal": "Internal or external client?"
}
widgets = {
'start_date': DateInput(), # default date-format %m/%d/%Y will be used
'description': forms.Textarea(attrs={'rows': 4}),
}
def __init__(self, *args, **kwargs):
# To get request.user. Do not use kwargs.pop('user', None) due to potential security hole
self.request = kwargs.pop('request')
super().__init__(*args, **kwargs)
def save(self, commit=True):
if commit:
self.instance.created_by = self.request.user
self.instance.organization = self.request.user.organization
return super().save(commit=commit)
class ProjectModalForm(BSModalModelForm):
class Meta:
model = Project
fields = ['name', 'division', 'internal', 'client', 'description', 'start_date']
labels = {
"name": "Project name",
"internal": "Internal or external client?"
}
widgets = {
'start_date': DateInput(), # default date-format %m/%d/%Y will be used
'description': forms.Textarea(attrs={'rows': 4}),
}
def save(self, commit=True):
if commit:
self.instance.created_by = self.request.user
self.instance.organization = self.request.user.organization
return super().save(commit=commit)
|
en
| 0.280405
|
# default date-format %m/%d/%Y will be used # To get request.user. Do not use kwargs.pop('user', None) due to potential security hole # default date-format %m/%d/%Y will be used
| 2.388418
| 2
|
smu/parser/smu_writer.py
|
pedersor/google-research
| 0
|
6629199
|
<reponame>pedersor/google-research
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A command line tool to write a protocol buffer to a file in SMU file format.
This tool is intended to faithfully reproduce the Basel University SMU file
format.
Example:
./smu_writer \
--alsologtostderr \
--input_file=<path to protobuf file> \
--output_file=<path to Basel .dat output file>
"""
from absl import app
from absl import flags
from absl import logging
from tensorflow.io import gfile
from google.protobuf import text_format
from smu import dataset_pb2
from smu.parser.smu_writer_lib import SmuWriter
flags.DEFINE_string('input_file', None,
'Path to the input file in SMU protobuf text format.')
flags.DEFINE_string(
'output_file', None, 'Path to the output file. ' +
'This file will be a protocol buffer in text format.' +
'If empty, outputs to stdout.')
flags.DEFINE_bool(
'annotate', False,
'Whether to generate annotations in the output file with proto field names')
flags.mark_flag_as_required('input_file')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
smu_proto = dataset_pb2.MultipleConformers()
with gfile.GFile(FLAGS.input_file) as f:
raw_proto = f.read()
text_format.Parse(raw_proto, smu_proto)
smu_writer = SmuWriter(FLAGS.annotate)
contents = ''.join(
smu_writer.process_stage2_proto(conformer)
for conformer in smu_proto.conformers
)
if FLAGS.output_file:
logging.info('Writing smu7 conformers to .dat file %s.', FLAGS.output_file)
with open(FLAGS.output_file, 'w') as f:
f.write(contents)
else:
print(contents, end='')
if __name__ == '__main__':
app.run(main)
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A command line tool to write a protocol buffer to a file in SMU file format.
This tool is intended to faithfully reproduce the Basel University SMU file
format.
Example:
./smu_writer \
--alsologtostderr \
--input_file=<path to protobuf file> \
--output_file=<path to Basel .dat output file>
"""
from absl import app
from absl import flags
from absl import logging
from tensorflow.io import gfile
from google.protobuf import text_format
from smu import dataset_pb2
from smu.parser.smu_writer_lib import SmuWriter
flags.DEFINE_string('input_file', None,
'Path to the input file in SMU protobuf text format.')
flags.DEFINE_string(
'output_file', None, 'Path to the output file. ' +
'This file will be a protocol buffer in text format.' +
'If empty, outputs to stdout.')
flags.DEFINE_bool(
'annotate', False,
'Whether to generate annotations in the output file with proto field names')
flags.mark_flag_as_required('input_file')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
smu_proto = dataset_pb2.MultipleConformers()
with gfile.GFile(FLAGS.input_file) as f:
raw_proto = f.read()
text_format.Parse(raw_proto, smu_proto)
smu_writer = SmuWriter(FLAGS.annotate)
contents = ''.join(
smu_writer.process_stage2_proto(conformer)
for conformer in smu_proto.conformers
)
if FLAGS.output_file:
logging.info('Writing smu7 conformers to .dat file %s.', FLAGS.output_file)
with open(FLAGS.output_file, 'w') as f:
f.write(contents)
else:
print(contents, end='')
if __name__ == '__main__':
app.run(main)
|
en
| 0.761327
|
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A command line tool to write a protocol buffer to a file in SMU file format. This tool is intended to faithfully reproduce the Basel University SMU file format. Example: ./smu_writer \ --alsologtostderr \ --input_file=<path to protobuf file> \ --output_file=<path to Basel .dat output file> # Unused.
| 2.110134
| 2
|
02-Array/02-03-slice.py
|
xiaohui100/FluentPython
| 0
|
6629200
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: Alfons
@contact: <EMAIL>
@file: 02-03-slice.py
@time: 2017/12/24 22:40
@version: v1.0
"""
# 切片
s = "bicycle"
slice_a = s[::3]
slice_b = s[::-2]
slice_c = s[::-1]
print(slice_a)
print(slice_b)
print(slice_c)
invoice = """
1909 Pimoroni PiBrella $17.50 3 $52.50
1489 6mm Tactile Switch x20 $4.95 2 $9.90
1510 Panavise Jr. - PV-201 $28.00 1 $28.00
1601 PiTFT Mini Kit 320x240 $34.95 1 $34.95
"""
for item in invoice.split("\n"):
SKU = slice(0, 5)
DESCRIPTION = slice(5, 31)
PRICE = slice(31, 37)
print(item[SKU], item[DESCRIPTION], item[PRICE])
# 多维切片
import numpy
# 给切片赋值
l = list(range(10))
print(l)
l[2:5] = [98, 99]
print(l)
del l[8:9]
print(l)
l[3::2] = [2222, 3333, 4444]
print(l)
# 序列的 + 与 * 操作
board = [['_'] * 3 for i in range(3)]
print(board)
board[2][2] = 'X'
print(board)
print("Diff with down.↓")
board = [['_'] * 3] * 3
print(board)
board[2][2] = 'X'
print(board)
# 增量赋值
l = [range(3)]
print(id(l))
l *= 3
print(id(l))
t = (1, 2, 3)
print(id(t))
t *= 3
print(id(t))
# 关于+= 有趣的例子
t = (1, 2, [50, 60])
try:
t[2] += [20]
except:
print("Except happend!")
pass
print(t)
# list.sort 与内置sorted函数
fruits = ["grape", "raspberry", "apple", "banana"]
print(sorted(fruits))
print(sorted(fruits, key = len))
print(sorted(fruits, reverse = True))
print(sorted(fruits, key = len, reverse = True))
print(fruits) # 不改变列表的原始值
fruits.sort()
print(fruits) # 改变列表的原始值
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: Alfons
@contact: <EMAIL>
@file: 02-03-slice.py
@time: 2017/12/24 22:40
@version: v1.0
"""
# 切片
s = "bicycle"
slice_a = s[::3]
slice_b = s[::-2]
slice_c = s[::-1]
print(slice_a)
print(slice_b)
print(slice_c)
invoice = """
1909 Pimoroni PiBrella $17.50 3 $52.50
1489 6mm Tactile Switch x20 $4.95 2 $9.90
1510 Panavise Jr. - PV-201 $28.00 1 $28.00
1601 PiTFT Mini Kit 320x240 $34.95 1 $34.95
"""
for item in invoice.split("\n"):
SKU = slice(0, 5)
DESCRIPTION = slice(5, 31)
PRICE = slice(31, 37)
print(item[SKU], item[DESCRIPTION], item[PRICE])
# 多维切片
import numpy
# 给切片赋值
l = list(range(10))
print(l)
l[2:5] = [98, 99]
print(l)
del l[8:9]
print(l)
l[3::2] = [2222, 3333, 4444]
print(l)
# 序列的 + 与 * 操作
board = [['_'] * 3 for i in range(3)]
print(board)
board[2][2] = 'X'
print(board)
print("Diff with down.↓")
board = [['_'] * 3] * 3
print(board)
board[2][2] = 'X'
print(board)
# 增量赋值
l = [range(3)]
print(id(l))
l *= 3
print(id(l))
t = (1, 2, 3)
print(id(t))
t *= 3
print(id(t))
# 关于+= 有趣的例子
t = (1, 2, [50, 60])
try:
t[2] += [20]
except:
print("Except happend!")
pass
print(t)
# list.sort 与内置sorted函数
fruits = ["grape", "raspberry", "apple", "banana"]
print(sorted(fruits))
print(sorted(fruits, key = len))
print(sorted(fruits, reverse = True))
print(sorted(fruits, key = len, reverse = True))
print(fruits) # 不改变列表的原始值
fruits.sort()
print(fruits) # 改变列表的原始值
|
zh
| 0.4493
|
#!/usr/bin/env python # encoding: utf-8 @author: Alfons @contact: <EMAIL> @file: 02-03-slice.py @time: 2017/12/24 22:40 @version: v1.0 # 切片 1909 Pimoroni PiBrella $17.50 3 $52.50 1489 6mm Tactile Switch x20 $4.95 2 $9.90 1510 Panavise Jr. - PV-201 $28.00 1 $28.00 1601 PiTFT Mini Kit 320x240 $34.95 1 $34.95 # 多维切片 # 给切片赋值 # 序列的 + 与 * 操作 # 增量赋值 # 关于+= 有趣的例子 # list.sort 与内置sorted函数 # 不改变列表的原始值 # 改变列表的原始值
| 3.391505
| 3
|
mything/feature.py
|
sdaves/mything-test-deploy
| 3
|
6629201
|
<gh_stars>1-10
# The MIT License (MIT) - Single File Copied
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Feature support.
"""
def getabsfile(func):
name = func.__name__
# __pragma__ ('skip')
import inspect
name = inspect.getabsfile(func)
# __pragma__ ('noskip')
return name
class Feature(object):
"""
The feature manager.
Usage
=====
Backends
--------
First thing to do is to choose a backend to use.
- EnvBackend() - Environment variable based
- Django Database: db_django
- Google Datastore: google_cloud_datastore
- JSON File: jsonfile
- Local Memory (testing): localmemory
- Memcache: memcached
- MongoDB: db_mongodb
- Redis Key/Value: redis_backend
- SQL Database: db_sqlalchemy
- Togglz File: togglzfile
Crating the feature instance
----------------------------
When creating a feature instance one must pass a backend and an optional logger to use.
.. code-block:: python
from feature import Feature
from flagon.backends.jsonfile import JSONFileBackend
# Make a backend
backend = JSONFileBackend('example/config.json')
# Make the feature instance
feature = Feature(backend)
Using the feature instance
--------------------------
Once a feature instance has been made it then can be used to tag callables as features. To do this one uses the feature instance as a decorator.
.. code-block:: python
@feature('a feature')
def say_something(data):
print(data)
Now that the function ``say_something`` has been tagged with the feature ``a feature`` it will only execute if ``a feature`` is set active.
.. note::
When features are not active they will raise a NameError unless there is a default set.
.. note::
When a feature is unknown it will raise an flagon.errors.UnknownFeatureError
Defaults
~~~~~~~~
Feature instance can also define defaults. Defaults are callables that will call **instead** of the original callable if the feature is off.
.. code-block:: python
import feature
def yell(data):
print(data.upper())
@feature('this feature is off', default=yell)
def say_something(data):
print(data)
@feature('this feature is off', default=feature.skip)
def say_something_else(data):
print(data)
"""
def __init__(self, backend, logger):
"""
Creates the feature manager.
:param backend: the backend to use for storing feature states.
:type backend: flagon.backends.Backend
:param logger: the logger like object to use for logging.
:type logger: logging.Logger
:rtype: Feature
"""
self.backend = backend
self.logger = logger
self.logger.debug(
'The feature decorator for flagon has been created with %s' % (backend.__class__.__name__)
)
def __call__(self, name, default=None):
"""
What acts as a decorator.
:param name: the name of the feature.
:type name: str
:param default: the default callable to fall back to.
:type default: callable or None
:rtype: callable
"""
if not self.backend or not self.backend.exists(name):
self.logger.error('An unknown feature was requested: %s' % name)
raise NotImplementedError('Unknown feature: %s' % name)
def deco(func):
def wrapper(*args, **kwargs):
if self.backend.is_active(name):
self.logger.debug(
'%s func=%s:%s(*%s, **%s)' % (name, getabsfile(func), func.__name__, args, kwargs)
)
return func(*args, **kwargs)
if default:
self.logger.warn('Disabled featured %s was requested.' ' Using default.' % name)
if self.logger.level == 'DEBUG':
self.logger.debug(
'%s default=%s:%s(*%s, **%s)'
% (name, getabsfile(default), default.__name__, args, kwargs)
)
return default(*args, **kwargs)
else:
self.logger.warn('Disabled featured %s was requested' % (name))
raise NameError("name '%s' is not enabled" % name)
val = wrapper
# __pragma__ ('skip')
from functools import wraps
val = wraps(func)(wrapper)
# __pragma__ ('noskip')
return val
return deco
def skip(self, *args, **kwargs):
pass
class Logger:
def debug(*args):
print(*args)
def error(*args):
print(*args)
def warn(*args):
print(*args)
def level(self):
return 'DEBUG'
class EnvBackend:
'''
Environment variable based backend
'''
def exists(self, name):
return True
def is_active(self, name):
active = True
# __pragma__ ('skip')
import os
val = os.getenv('MYTHING_FEATURES', '').strip()
active = val == '' or name in val.split(',')
# __pragma__ ('noskip')
return active
def create(backend=EnvBackend(), logger=Logger()):
return Feature(backend, logger)
|
# The MIT License (MIT) - Single File Copied
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Feature support.
"""
def getabsfile(func):
name = func.__name__
# __pragma__ ('skip')
import inspect
name = inspect.getabsfile(func)
# __pragma__ ('noskip')
return name
class Feature(object):
"""
The feature manager.
Usage
=====
Backends
--------
First thing to do is to choose a backend to use.
- EnvBackend() - Environment variable based
- Django Database: db_django
- Google Datastore: google_cloud_datastore
- JSON File: jsonfile
- Local Memory (testing): localmemory
- Memcache: memcached
- MongoDB: db_mongodb
- Redis Key/Value: redis_backend
- SQL Database: db_sqlalchemy
- Togglz File: togglzfile
Crating the feature instance
----------------------------
When creating a feature instance one must pass a backend and an optional logger to use.
.. code-block:: python
from feature import Feature
from flagon.backends.jsonfile import JSONFileBackend
# Make a backend
backend = JSONFileBackend('example/config.json')
# Make the feature instance
feature = Feature(backend)
Using the feature instance
--------------------------
Once a feature instance has been made it then can be used to tag callables as features. To do this one uses the feature instance as a decorator.
.. code-block:: python
@feature('a feature')
def say_something(data):
print(data)
Now that the function ``say_something`` has been tagged with the feature ``a feature`` it will only execute if ``a feature`` is set active.
.. note::
When features are not active they will raise a NameError unless there is a default set.
.. note::
When a feature is unknown it will raise an flagon.errors.UnknownFeatureError
Defaults
~~~~~~~~
Feature instance can also define defaults. Defaults are callables that will call **instead** of the original callable if the feature is off.
.. code-block:: python
import feature
def yell(data):
print(data.upper())
@feature('this feature is off', default=yell)
def say_something(data):
print(data)
@feature('this feature is off', default=feature.skip)
def say_something_else(data):
print(data)
"""
def __init__(self, backend, logger):
"""
Creates the feature manager.
:param backend: the backend to use for storing feature states.
:type backend: flagon.backends.Backend
:param logger: the logger like object to use for logging.
:type logger: logging.Logger
:rtype: Feature
"""
self.backend = backend
self.logger = logger
self.logger.debug(
'The feature decorator for flagon has been created with %s' % (backend.__class__.__name__)
)
def __call__(self, name, default=None):
"""
What acts as a decorator.
:param name: the name of the feature.
:type name: str
:param default: the default callable to fall back to.
:type default: callable or None
:rtype: callable
"""
if not self.backend or not self.backend.exists(name):
self.logger.error('An unknown feature was requested: %s' % name)
raise NotImplementedError('Unknown feature: %s' % name)
def deco(func):
def wrapper(*args, **kwargs):
if self.backend.is_active(name):
self.logger.debug(
'%s func=%s:%s(*%s, **%s)' % (name, getabsfile(func), func.__name__, args, kwargs)
)
return func(*args, **kwargs)
if default:
self.logger.warn('Disabled featured %s was requested.' ' Using default.' % name)
if self.logger.level == 'DEBUG':
self.logger.debug(
'%s default=%s:%s(*%s, **%s)'
% (name, getabsfile(default), default.__name__, args, kwargs)
)
return default(*args, **kwargs)
else:
self.logger.warn('Disabled featured %s was requested' % (name))
raise NameError("name '%s' is not enabled" % name)
val = wrapper
# __pragma__ ('skip')
from functools import wraps
val = wraps(func)(wrapper)
# __pragma__ ('noskip')
return val
return deco
def skip(self, *args, **kwargs):
pass
class Logger:
def debug(*args):
print(*args)
def error(*args):
print(*args)
def warn(*args):
print(*args)
def level(self):
return 'DEBUG'
class EnvBackend:
'''
Environment variable based backend
'''
def exists(self, name):
return True
def is_active(self, name):
active = True
# __pragma__ ('skip')
import os
val = os.getenv('MYTHING_FEATURES', '').strip()
active = val == '' or name in val.split(',')
# __pragma__ ('noskip')
return active
def create(backend=EnvBackend(), logger=Logger()):
return Feature(backend, logger)
|
en
| 0.687628
|
# The MIT License (MIT) - Single File Copied # # Copyright (c) 2014 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. Feature support. # __pragma__ ('skip') # __pragma__ ('noskip') The feature manager. Usage ===== Backends -------- First thing to do is to choose a backend to use. - EnvBackend() - Environment variable based - Django Database: db_django - Google Datastore: google_cloud_datastore - JSON File: jsonfile - Local Memory (testing): localmemory - Memcache: memcached - MongoDB: db_mongodb - Redis Key/Value: redis_backend - SQL Database: db_sqlalchemy - Togglz File: togglzfile Crating the feature instance ---------------------------- When creating a feature instance one must pass a backend and an optional logger to use. .. code-block:: python from feature import Feature from flagon.backends.jsonfile import JSONFileBackend # Make a backend backend = JSONFileBackend('example/config.json') # Make the feature instance feature = Feature(backend) Using the feature instance -------------------------- Once a feature instance has been made it then can be used to tag callables as features. To do this one uses the feature instance as a decorator. .. code-block:: python @feature('a feature') def say_something(data): print(data) Now that the function ``say_something`` has been tagged with the feature ``a feature`` it will only execute if ``a feature`` is set active. .. note:: When features are not active they will raise a NameError unless there is a default set. .. note:: When a feature is unknown it will raise an flagon.errors.UnknownFeatureError Defaults ~~~~~~~~ Feature instance can also define defaults. Defaults are callables that will call **instead** of the original callable if the feature is off. .. code-block:: python import feature def yell(data): print(data.upper()) @feature('this feature is off', default=yell) def say_something(data): print(data) @feature('this feature is off', default=feature.skip) def say_something_else(data): print(data) Creates the feature manager. :param backend: the backend to use for storing feature states. :type backend: flagon.backends.Backend :param logger: the logger like object to use for logging. :type logger: logging.Logger :rtype: Feature What acts as a decorator. :param name: the name of the feature. :type name: str :param default: the default callable to fall back to. :type default: callable or None :rtype: callable # __pragma__ ('skip') # __pragma__ ('noskip') Environment variable based backend # __pragma__ ('skip') # __pragma__ ('noskip')
| 1.714139
| 2
|
generate_random_graphs.py
|
ls-cwi/lana
| 0
|
6629202
|
import itertools
import random
n_graphs = 5
n_nodes = 5000
p_edge_graph = 0.0008
p_edge_compatibility = 0.002
data_folder = "data/gen/"
output_folder = "output/"
def generate_random_graph(num_nodes, p_edge, name):
print "Generating graph '%s'." % name
nodes = range(0,num_nodes)
edges = filter(lambda x: x[0] < x[1] and random.random() < p_edge, itertools.product(nodes, nodes))
return (nodes, edges, name)
def generate_compatibility_graph(g1, g2, p_edge):
print "Generating compatibility graph '%s_%s'." % (g1[2], g2[2])
g1_labels = [g1[2] + str(x) for x in g1[0]]
g2_labels = [g2[2] + str(x) for x in g2[0]]
return dict([(x, filter(lambda y: random.random() < p_edge, g2_labels)) for x in g1_labels])
def save_graph(g, file_name):
print "Saving graph '%s'." % file_name
file = open(file_name, "w")
file.write( "graph [\n"
" comment \"%s graph\"\n"
" directed 0\n"
"\n" % g[2])
for node in g[0]:
file.write( " node [\n"
" id %d\n"
" label \"%s%d\"\n"
" ]\n" % (node, g[2], node))
for edge in g[1]:
file.write( " edge [\n"
" source %d\n"
" target %d\n"
" label \"%s%d_%s%d\"\n"
" ]\n" % (edge[0], edge[1], g[2], edge[0], g[2], edge[1]))
file.write("] \n")
file.close()
def save_compatibility_graph(g, file_name):
print "Saving compatibility graph '%s'." % file_name
file = open(file_name, "w")
for (v1, vs2) in g.items():
if vs2:
file.write("%s\t%s\n" % (v1, ' '.join([x for x in vs2])))
def generate_run_script(graphs, data_folder, output_folder, file_name):
print "Generating run script (%s)." % file_name
file = open(file_name, 'w')
for (g1, g2) in itertools.combinations(graphs, 2):
file.write("build/lana -if1 0 -if2 0 -ifm 0 -g1 %s%s.gml -g2 %s%s.gml -gm %s%s_%s.seqSim -freq %s%s_%s-freq.csv $1; \n"
% (data_folder, g1[2], data_folder, g2[2], data_folder, g1[2], g2[2], output_folder, g1[2], g2[2]))
def generate_data(n_graphs, n_nodes, p_edge_graph, p_edge_compatibility, data_folder, output_folder):
if n_graphs > 26:
print "Can't make more than 26 graphs!"
graphs = [generate_random_graph(n_nodes, p_edge_graph, str(chr(ord('a')+x))) for x in range(0, n_graphs)]
for g in graphs:
save_graph(g, "%s%s.gml" % (data_folder, g[2]))
for (g1, g2) in itertools.combinations(graphs, 2):
gc = generate_compatibility_graph(g1, g2, p_edge_compatibility)
save_compatibility_graph(gc, "%s%s_%s.seqSim" % (data_folder, g1[2], g2[2]))
generate_run_script(graphs, data_folder, output_folder, "run_all.sh")
generate_data(n_graphs, n_nodes, p_edge_graph, p_edge_compatibility, data_folder, output_folder)
|
import itertools
import random
n_graphs = 5
n_nodes = 5000
p_edge_graph = 0.0008
p_edge_compatibility = 0.002
data_folder = "data/gen/"
output_folder = "output/"
def generate_random_graph(num_nodes, p_edge, name):
print "Generating graph '%s'." % name
nodes = range(0,num_nodes)
edges = filter(lambda x: x[0] < x[1] and random.random() < p_edge, itertools.product(nodes, nodes))
return (nodes, edges, name)
def generate_compatibility_graph(g1, g2, p_edge):
print "Generating compatibility graph '%s_%s'." % (g1[2], g2[2])
g1_labels = [g1[2] + str(x) for x in g1[0]]
g2_labels = [g2[2] + str(x) for x in g2[0]]
return dict([(x, filter(lambda y: random.random() < p_edge, g2_labels)) for x in g1_labels])
def save_graph(g, file_name):
print "Saving graph '%s'." % file_name
file = open(file_name, "w")
file.write( "graph [\n"
" comment \"%s graph\"\n"
" directed 0\n"
"\n" % g[2])
for node in g[0]:
file.write( " node [\n"
" id %d\n"
" label \"%s%d\"\n"
" ]\n" % (node, g[2], node))
for edge in g[1]:
file.write( " edge [\n"
" source %d\n"
" target %d\n"
" label \"%s%d_%s%d\"\n"
" ]\n" % (edge[0], edge[1], g[2], edge[0], g[2], edge[1]))
file.write("] \n")
file.close()
def save_compatibility_graph(g, file_name):
print "Saving compatibility graph '%s'." % file_name
file = open(file_name, "w")
for (v1, vs2) in g.items():
if vs2:
file.write("%s\t%s\n" % (v1, ' '.join([x for x in vs2])))
def generate_run_script(graphs, data_folder, output_folder, file_name):
print "Generating run script (%s)." % file_name
file = open(file_name, 'w')
for (g1, g2) in itertools.combinations(graphs, 2):
file.write("build/lana -if1 0 -if2 0 -ifm 0 -g1 %s%s.gml -g2 %s%s.gml -gm %s%s_%s.seqSim -freq %s%s_%s-freq.csv $1; \n"
% (data_folder, g1[2], data_folder, g2[2], data_folder, g1[2], g2[2], output_folder, g1[2], g2[2]))
def generate_data(n_graphs, n_nodes, p_edge_graph, p_edge_compatibility, data_folder, output_folder):
if n_graphs > 26:
print "Can't make more than 26 graphs!"
graphs = [generate_random_graph(n_nodes, p_edge_graph, str(chr(ord('a')+x))) for x in range(0, n_graphs)]
for g in graphs:
save_graph(g, "%s%s.gml" % (data_folder, g[2]))
for (g1, g2) in itertools.combinations(graphs, 2):
gc = generate_compatibility_graph(g1, g2, p_edge_compatibility)
save_compatibility_graph(gc, "%s%s_%s.seqSim" % (data_folder, g1[2], g2[2]))
generate_run_script(graphs, data_folder, output_folder, "run_all.sh")
generate_data(n_graphs, n_nodes, p_edge_graph, p_edge_compatibility, data_folder, output_folder)
|
none
| 1
| 3.125388
| 3
|
|
src/gmit/re/com/Test.py
|
JoseIgnacioRetamalThomsen/Thompson-s-construction-
| 1
|
6629203
|
import unittest
import Shunting
import Thomsons
import ThomsonsMap
class Test(unittest.TestCase):
@unittest.skip("feature not implemented")
def test_no_dot_shunting(self):
"""
Test the use of no dot for concatenation on shunting algorithm.
:return: Nothing.
"""
testCases = [ ("ab", "a.b"), ("abc", "a.b.c"), ("a*b*", "a*.b*"), ("(a-z)b?", "(a-z).b?"), (
"a?b+c*(a-z)*t", "a?.b+.c*.(a-z)*.t"), ("(0|(1(01*(00)*0)*1)*)*", "(0|(1.(0.1*.(0.0)*.0)*.1)*)*"),
(
"((a-z)|(A-Z)|(0-9)).((a-z)|(A-Z)|(0-9)|_|/.)*.@.((a-z)|(A-Z)|/.)*./..(((a-z)|(A-Z)).((a-z)|(A-Z)).((a-z)|(A-Z))|((a-z)|(A-Z)).((a-z)|(A-Z)))",
"((a-z)|(A-Z)|(0-9))((a-z)|(A-Z)|(0-9)|_|/.)*@((a-z)|(A-Z)|/.)*/.(((a-z)|(A-Z))((a-z)|(A-Z))((a-z)|(A-Z))|((a-z)|(A-Z))((a-z)|(A-Z)))"),
("abc","abc")]
for case in testCases:
print(case[0])
self.assertEqual(Shunting.Converter().toPofix(case[0]), Shunting.Converter().toPofix(case[1]))
matchtestcases = [ ("a.b.c","",False),
("a.b.c", "abc",True),
("a.b.c","abbc",False),
("a.b.c", "abcc", False),
("a.b.c", "abad", False),
("a.b.c", "abbbc", False),
("a.b.c", "adc", False),
("a.(b|d).c", "", False),
("a.(b|d).c", "abc", True),
("a.(b|d).c", "abbc", False),
("a.(b|d).c", "abcc", False),
("a.(b|d).c", "abad", False),
("a.(b|d).c", "abbbc", False),
("a.(b|d).c", "adc", True),
("a.(b|d)*", "", False),
("a.(b|d)*", "abc", False),
("a.(b|d)*", "abbc", False),
("a.(b|d)*", "abcc", False),
("a.(b|d)*", "abad", False),
("a.(b|d)*", "abbbc", False),
("a.(b|d)*", "adc", False),
("a.(b.b)*.c", "", False),
("a.(b.b)*.c", "abc", False),
("a.(b.b)*.c", "abbc", True),
("a.(b.b)*.c", "abcc", False),
("a.(b.b)*.c", "abad", False),
("a.(b.b)*.c", "abbbc", False),
("a.(b.b)*.c", "adc", False),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "00", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "11", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "000", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "011", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "110", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0000", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0011", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0110", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "1001", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "00000", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "1", False),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "10", False),
]
# fail becuase overflow for multiple of 3 regex test
@unittest.expectedFailure
def test_thomsons(self):
for case in self.matchtestcases:
self.assertEqual(Thomsons.match(case[0],case[1]),case[2])
# def test_isupper(self):
# self.assertTrue('FOO'.isupper())
# self.assertFalse('Foo'.isupper())
#
# def test_split(self):
# s = 'hello world'
# self.assertEqual(s.split(), ['hello', 'world'])
# # check that s.split fails when the separator is not a string
# with self.assertRaises(TypeError):
# s.split(2)
def test_thomsonsMap(self):
for case in self.matchtestcases:
self.assertEqual(ThomsonsMap.compile(Shunting.Converter().toPofix(case[0])).run(case[1]),case[2])
if __name__ == '__main__':
unittest.main()
|
import unittest
import Shunting
import Thomsons
import ThomsonsMap
class Test(unittest.TestCase):
@unittest.skip("feature not implemented")
def test_no_dot_shunting(self):
"""
Test the use of no dot for concatenation on shunting algorithm.
:return: Nothing.
"""
testCases = [ ("ab", "a.b"), ("abc", "a.b.c"), ("a*b*", "a*.b*"), ("(a-z)b?", "(a-z).b?"), (
"a?b+c*(a-z)*t", "a?.b+.c*.(a-z)*.t"), ("(0|(1(01*(00)*0)*1)*)*", "(0|(1.(0.1*.(0.0)*.0)*.1)*)*"),
(
"((a-z)|(A-Z)|(0-9)).((a-z)|(A-Z)|(0-9)|_|/.)*.@.((a-z)|(A-Z)|/.)*./..(((a-z)|(A-Z)).((a-z)|(A-Z)).((a-z)|(A-Z))|((a-z)|(A-Z)).((a-z)|(A-Z)))",
"((a-z)|(A-Z)|(0-9))((a-z)|(A-Z)|(0-9)|_|/.)*@((a-z)|(A-Z)|/.)*/.(((a-z)|(A-Z))((a-z)|(A-Z))((a-z)|(A-Z))|((a-z)|(A-Z))((a-z)|(A-Z)))"),
("abc","abc")]
for case in testCases:
print(case[0])
self.assertEqual(Shunting.Converter().toPofix(case[0]), Shunting.Converter().toPofix(case[1]))
matchtestcases = [ ("a.b.c","",False),
("a.b.c", "abc",True),
("a.b.c","abbc",False),
("a.b.c", "abcc", False),
("a.b.c", "abad", False),
("a.b.c", "abbbc", False),
("a.b.c", "adc", False),
("a.(b|d).c", "", False),
("a.(b|d).c", "abc", True),
("a.(b|d).c", "abbc", False),
("a.(b|d).c", "abcc", False),
("a.(b|d).c", "abad", False),
("a.(b|d).c", "abbbc", False),
("a.(b|d).c", "adc", True),
("a.(b|d)*", "", False),
("a.(b|d)*", "abc", False),
("a.(b|d)*", "abbc", False),
("a.(b|d)*", "abcc", False),
("a.(b|d)*", "abad", False),
("a.(b|d)*", "abbbc", False),
("a.(b|d)*", "adc", False),
("a.(b.b)*.c", "", False),
("a.(b.b)*.c", "abc", False),
("a.(b.b)*.c", "abbc", True),
("a.(b.b)*.c", "abcc", False),
("a.(b.b)*.c", "abad", False),
("a.(b.b)*.c", "abbbc", False),
("a.(b.b)*.c", "adc", False),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "00", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "11", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "000", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "011", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "110", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0000", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0011", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "0110", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "1001", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "00000", True),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "1", False),
("(0|((1.(0.1*.(0.0)*.0)*.1))*)*", "10", False),
]
# fail becuase overflow for multiple of 3 regex test
@unittest.expectedFailure
def test_thomsons(self):
for case in self.matchtestcases:
self.assertEqual(Thomsons.match(case[0],case[1]),case[2])
# def test_isupper(self):
# self.assertTrue('FOO'.isupper())
# self.assertFalse('Foo'.isupper())
#
# def test_split(self):
# s = 'hello world'
# self.assertEqual(s.split(), ['hello', 'world'])
# # check that s.split fails when the separator is not a string
# with self.assertRaises(TypeError):
# s.split(2)
def test_thomsonsMap(self):
for case in self.matchtestcases:
self.assertEqual(ThomsonsMap.compile(Shunting.Converter().toPofix(case[0])).run(case[1]),case[2])
if __name__ == '__main__':
unittest.main()
|
en
| 0.476878
|
Test the use of no dot for concatenation on shunting algorithm. :return: Nothing. # fail becuase overflow for multiple of 3 regex test # def test_isupper(self): # self.assertTrue('FOO'.isupper()) # self.assertFalse('Foo'.isupper()) # # def test_split(self): # s = 'hello world' # self.assertEqual(s.split(), ['hello', 'world']) # # check that s.split fails when the separator is not a string # with self.assertRaises(TypeError): # s.split(2)
| 3.27235
| 3
|
day_1/loop_control_flow.py
|
anishLearnsToCode/python-workshop-7
| 4
|
6629204
|
# continue
# for i in range(1, 11):
# if i == 4 or i == 8:
# continue
# print(i)
# break
# start = 0
# stop = 5
# step = 1
for number in range(5):
print(number)
if number > 2:
break
print('hello')
print('i am outside loop')
"""
output
0
1
2
3
i am outside loop
"""
|
# continue
# for i in range(1, 11):
# if i == 4 or i == 8:
# continue
# print(i)
# break
# start = 0
# stop = 5
# step = 1
for number in range(5):
print(number)
if number > 2:
break
print('hello')
print('i am outside loop')
"""
output
0
1
2
3
i am outside loop
"""
|
en
| 0.622223
|
# continue # for i in range(1, 11): # if i == 4 or i == 8: # continue # print(i) # break # start = 0 # stop = 5 # step = 1 output 0 1 2 3 i am outside loop
| 4.099514
| 4
|
miniature_potato/models.py
|
katerina7479/miniature-potato
| 0
|
6629205
|
<filename>miniature_potato/models.py
from django.db import models
from django.contrib.auth.models import User
class Todo(models.Model):
text = models.TextField()
createdAt = models.DateTimeField('created_at', auto_now_add=True)
completedAt = models.DateTimeField('completed_at', null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
|
<filename>miniature_potato/models.py
from django.db import models
from django.contrib.auth.models import User
class Todo(models.Model):
text = models.TextField()
createdAt = models.DateTimeField('created_at', auto_now_add=True)
completedAt = models.DateTimeField('completed_at', null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
|
none
| 1
| 2.162185
| 2
|
|
samples/bccharts_example_2.py
|
Richard-L-Johnson/pyalgotrader
| 3,719
|
6629206
|
from pyalgotrade import bar
from pyalgotrade import strategy
from pyalgotrade import plotter
from pyalgotrade.technical import vwap
from pyalgotrade.barfeed import csvfeed
from pyalgotrade.bitstamp import broker
from pyalgotrade import broker as basebroker
class VWAPMomentum(strategy.BacktestingStrategy):
MIN_TRADE = 5
def __init__(self, feed, brk, instrument, vwapWindowSize, buyThreshold, sellThreshold):
super(VWAPMomentum, self).__init__(feed, brk)
self.__instrument = instrument
self.__vwap = vwap.VWAP(feed[instrument], vwapWindowSize)
self.__buyThreshold = buyThreshold
self.__sellThreshold = sellThreshold
def _getActiveOrders(self):
orders = self.getBroker().getActiveOrders()
buy = [o for o in orders if o.isBuy()]
sell = [o for o in orders if o.isSell()]
return buy, sell
def _cancelOrders(self, orders):
brk = self.getBroker()
for o in orders:
self.info("Canceling order %s" % (o.getId()))
brk.cancelOrder(o)
def _buySignal(self, price):
buyOrders, sellOrders = self._getActiveOrders()
self._cancelOrders(sellOrders)
brk = self.getBroker()
cashAvail = brk.getCash() * 0.98
size = round(cashAvail / price, 3)
if len(buyOrders) == 0 and price*size > VWAPMomentum.MIN_TRADE:
self.info("Buy %s at %s" % (size, price))
try:
self.limitOrder(self.__instrument, price, size)
except Exception as e:
self.error("Failed to buy: %s" % (e))
def _sellSignal(self, price):
buyOrders, sellOrders = self._getActiveOrders()
self._cancelOrders(buyOrders)
brk = self.getBroker()
shares = brk.getShares(self.__instrument)
if len(sellOrders) == 0 and shares > 0:
self.info("Sell %s at %s" % (shares, price))
self.limitOrder(self.__instrument, price, shares*-1)
def getVWAP(self):
return self.__vwap
def onBars(self, bars):
vwap = self.__vwap[-1]
if vwap is None:
return
price = bars[self.__instrument].getClose()
if price > vwap * (1 + self.__buyThreshold):
self._buySignal(price)
elif price < vwap * (1 - self.__sellThreshold):
self._sellSignal(price)
def onOrderUpdated(self, order):
if order.isBuy():
orderType = "Buy"
else:
orderType = "Sell"
exec_info_str = ""
if order.getExecutionInfo():
exec_info_str = " - Price: %s - Amount: %s - Fee: %s" % (
order.getExecutionInfo().getPrice(), order.getExecutionInfo().getQuantity(),
round(order.getExecutionInfo().getCommission(), 2)
)
self.info("%s order %d updated - Status: %s%s" % (
orderType,
order.getId(),
basebroker.Order.State.toString(order.getState()),
exec_info_str
))
def main(plot):
instrument = "BTC"
initialCash = 1000
vwapWindowSize = 100
buyThreshold = 0.02
sellThreshold = 0.01
barFeed = csvfeed.GenericBarFeed(bar.Frequency.MINUTE*30)
barFeed.addBarsFromCSV(instrument, "30min-bitstampUSD.csv")
brk = broker.BacktestingBroker(initialCash, barFeed)
strat = VWAPMomentum(barFeed, brk, instrument, vwapWindowSize, buyThreshold, sellThreshold)
if plot:
plt = plotter.StrategyPlotter(strat)
plt.getInstrumentSubplot(instrument).addDataSeries("VWAP", strat.getVWAP())
strat.run()
if plot:
plt.plot()
if __name__ == "__main__":
main(True)
|
from pyalgotrade import bar
from pyalgotrade import strategy
from pyalgotrade import plotter
from pyalgotrade.technical import vwap
from pyalgotrade.barfeed import csvfeed
from pyalgotrade.bitstamp import broker
from pyalgotrade import broker as basebroker
class VWAPMomentum(strategy.BacktestingStrategy):
MIN_TRADE = 5
def __init__(self, feed, brk, instrument, vwapWindowSize, buyThreshold, sellThreshold):
super(VWAPMomentum, self).__init__(feed, brk)
self.__instrument = instrument
self.__vwap = vwap.VWAP(feed[instrument], vwapWindowSize)
self.__buyThreshold = buyThreshold
self.__sellThreshold = sellThreshold
def _getActiveOrders(self):
orders = self.getBroker().getActiveOrders()
buy = [o for o in orders if o.isBuy()]
sell = [o for o in orders if o.isSell()]
return buy, sell
def _cancelOrders(self, orders):
brk = self.getBroker()
for o in orders:
self.info("Canceling order %s" % (o.getId()))
brk.cancelOrder(o)
def _buySignal(self, price):
buyOrders, sellOrders = self._getActiveOrders()
self._cancelOrders(sellOrders)
brk = self.getBroker()
cashAvail = brk.getCash() * 0.98
size = round(cashAvail / price, 3)
if len(buyOrders) == 0 and price*size > VWAPMomentum.MIN_TRADE:
self.info("Buy %s at %s" % (size, price))
try:
self.limitOrder(self.__instrument, price, size)
except Exception as e:
self.error("Failed to buy: %s" % (e))
def _sellSignal(self, price):
buyOrders, sellOrders = self._getActiveOrders()
self._cancelOrders(buyOrders)
brk = self.getBroker()
shares = brk.getShares(self.__instrument)
if len(sellOrders) == 0 and shares > 0:
self.info("Sell %s at %s" % (shares, price))
self.limitOrder(self.__instrument, price, shares*-1)
def getVWAP(self):
return self.__vwap
def onBars(self, bars):
vwap = self.__vwap[-1]
if vwap is None:
return
price = bars[self.__instrument].getClose()
if price > vwap * (1 + self.__buyThreshold):
self._buySignal(price)
elif price < vwap * (1 - self.__sellThreshold):
self._sellSignal(price)
def onOrderUpdated(self, order):
if order.isBuy():
orderType = "Buy"
else:
orderType = "Sell"
exec_info_str = ""
if order.getExecutionInfo():
exec_info_str = " - Price: %s - Amount: %s - Fee: %s" % (
order.getExecutionInfo().getPrice(), order.getExecutionInfo().getQuantity(),
round(order.getExecutionInfo().getCommission(), 2)
)
self.info("%s order %d updated - Status: %s%s" % (
orderType,
order.getId(),
basebroker.Order.State.toString(order.getState()),
exec_info_str
))
def main(plot):
instrument = "BTC"
initialCash = 1000
vwapWindowSize = 100
buyThreshold = 0.02
sellThreshold = 0.01
barFeed = csvfeed.GenericBarFeed(bar.Frequency.MINUTE*30)
barFeed.addBarsFromCSV(instrument, "30min-bitstampUSD.csv")
brk = broker.BacktestingBroker(initialCash, barFeed)
strat = VWAPMomentum(barFeed, brk, instrument, vwapWindowSize, buyThreshold, sellThreshold)
if plot:
plt = plotter.StrategyPlotter(strat)
plt.getInstrumentSubplot(instrument).addDataSeries("VWAP", strat.getVWAP())
strat.run()
if plot:
plt.plot()
if __name__ == "__main__":
main(True)
|
none
| 1
| 2.305316
| 2
|
|
vantivsdk/pgp_helper.py
|
Devenlabo123/vantiv-sdk-for-python
| 0
|
6629207
|
<reponame>Devenlabo123/vantiv-sdk-for-python
from subprocess import call
from subprocess import check_output
from subprocess import CalledProcessError
import os
from . import (utils)
class PgpHelper(object):
# Encrypt a file.
def encryptFile(self, recipient, toBeEncryptedFilepath, outputFilepath):
# Call gpg command line to encrypt the file.
try:
check_output(["gpg",
"--batch",
"--yes",
"--no-secmem-warning",
"--armor",
"--trust-model", "always",
"--output", outputFilepath,
"--recipient", recipient,
"--encrypt", toBeEncryptedFilepath])
# Check for error code.
print("\"%s\" has been encrypted to \"%s\"." % (toBeEncryptedFilepath, outputFilepath))
except CalledProcessError as err:
raise utils.VantivException("Encrypting the file has failed!\n%s" % err.output)
# Handle gpg encryption when the output filename is the same as the input filename.
def encryptFileSameName(self, recipient, toBeEncryptedFilepath):
try:
temp = 'pgp.vantiv'
self.encryptFile(recipient, toBeEncryptedFilepath, temp)
writer = open(toBeEncryptedFilepath, 'wb')
reader = open(temp, 'rb')
writer.write(reader.read())
writer.close()
reader.close()
os.remove(temp)
except CalledProcessError as err:
raise utils.VantivException("Encrypting the file to the output with the same name has failed!\n%s" % err.output)
# Decrypt an encrypted file.
def decryptFile(self, passphrase, encryptedFilepath, outputFilepath):
# Call gpg command line to decrypt the file.
try:
check_output(["gpg",
"--batch",
"--yes",
"--no-secmem-warning",
"--no-mdc-warning",
"--output", outputFilepath,
"--passphrase", passphrase,
"--decrypt", encryptedFilepath])
# Check for error code.
print("\"%s\" has been decrypted to \"%s\"." % (encryptedFilepath, outputFilepath))
except CalledProcessError as err:
raise utils.VantivException("Decrypting the file has failed!\n%s" % err.output)
# Add Vantiv public key into merchants' keyrings.
def importVantivPublicKey(self, publicKeyFilePath):
# Call gpg command line to import public key.
try:
check_output(["gpg",
"--import", publicKeyFilePath])
#Check for error code.
print("Successfully added Vantiv public key!")
except CalledProcessError as err:
raise utils.VantivException("Adding Vantiv public key has failed with error code is %s.\n" % err.output)
|
from subprocess import call
from subprocess import check_output
from subprocess import CalledProcessError
import os
from . import (utils)
class PgpHelper(object):
# Encrypt a file.
def encryptFile(self, recipient, toBeEncryptedFilepath, outputFilepath):
# Call gpg command line to encrypt the file.
try:
check_output(["gpg",
"--batch",
"--yes",
"--no-secmem-warning",
"--armor",
"--trust-model", "always",
"--output", outputFilepath,
"--recipient", recipient,
"--encrypt", toBeEncryptedFilepath])
# Check for error code.
print("\"%s\" has been encrypted to \"%s\"." % (toBeEncryptedFilepath, outputFilepath))
except CalledProcessError as err:
raise utils.VantivException("Encrypting the file has failed!\n%s" % err.output)
# Handle gpg encryption when the output filename is the same as the input filename.
def encryptFileSameName(self, recipient, toBeEncryptedFilepath):
try:
temp = 'pgp.vantiv'
self.encryptFile(recipient, toBeEncryptedFilepath, temp)
writer = open(toBeEncryptedFilepath, 'wb')
reader = open(temp, 'rb')
writer.write(reader.read())
writer.close()
reader.close()
os.remove(temp)
except CalledProcessError as err:
raise utils.VantivException("Encrypting the file to the output with the same name has failed!\n%s" % err.output)
# Decrypt an encrypted file.
def decryptFile(self, passphrase, encryptedFilepath, outputFilepath):
# Call gpg command line to decrypt the file.
try:
check_output(["gpg",
"--batch",
"--yes",
"--no-secmem-warning",
"--no-mdc-warning",
"--output", outputFilepath,
"--passphrase", passphrase,
"--decrypt", encryptedFilepath])
# Check for error code.
print("\"%s\" has been decrypted to \"%s\"." % (encryptedFilepath, outputFilepath))
except CalledProcessError as err:
raise utils.VantivException("Decrypting the file has failed!\n%s" % err.output)
# Add Vantiv public key into merchants' keyrings.
def importVantivPublicKey(self, publicKeyFilePath):
# Call gpg command line to import public key.
try:
check_output(["gpg",
"--import", publicKeyFilePath])
#Check for error code.
print("Successfully added Vantiv public key!")
except CalledProcessError as err:
raise utils.VantivException("Adding Vantiv public key has failed with error code is %s.\n" % err.output)
|
en
| 0.733389
|
# Encrypt a file. # Call gpg command line to encrypt the file. # Check for error code. # Handle gpg encryption when the output filename is the same as the input filename. # Decrypt an encrypted file. # Call gpg command line to decrypt the file. # Check for error code. # Add Vantiv public key into merchants' keyrings. # Call gpg command line to import public key. #Check for error code.
| 3.004093
| 3
|
main.py
|
Kimxons/passwd-locker
| 0
|
6629208
|
<gh_stars>0
#!/usr/bin/env python3.8
# vim: set fileencoding=<utf-8> :
import string
import random
class User:
'''
This Class contains user methods and attrs
'''
user_info = []
def __init__(self,first,last,password):
"""
Information needed to create a password saving object
"""
self.first = first
self.last = last
self.password = password
def create_user(self):
"""
Create an instance of a new user
"""
self.user_info.append(self)
class Credentials(User):
"""
Class that holds credential information and associated methods eg. add, remove and view creadentials
"""
credentials_info = []
user_cred_info = []
@classmethod
def user_check(cls,first,password):
"""
Checks for matching credentials in user_info
"""
for cred in cls.user_info:
if cred.first == first and cred.password == password:
identity = cred.first
return identity
def __init__(self,name,username,platform,pwd):
"""
Initialize new Credentials object
"""
self.name = name
self.username = username
self.platform = platform
self.pwd = <PASSWORD>
def save_cred(self):
"""
Saves credentials in credentials_info list
"""
Credentials.credentials_info.append(self)
def password_gen(size):
"""
Generate a random string of letters and digits
"""
lettersAndNumbers = string.ascii_letters + string.digits
password = ''.join(random.choice(lettersAndNumbers) for i in range(int(size)))
return password
@classmethod
def show_credentials(cls,username):
"""
Shows the saved credentials
"""
for cred in cls.credentials_info:
if cred.username == username:
cls.user_cred_info.append(cred)
return cls.user_cred_info
@classmethod
def find_platform(cls,platform):
"""
Finds the platform's credentials
"""
for cred in cls.credentials_info:
if cred.platform == platform:
return cred
@classmethod
def del_cred(cls,cred):
"""
Deletes credentials saved, and used together with the find platform method
"""
for credential in cls.credentials_info:
if credential == cred:
del credential
return "Deleted"
|
#!/usr/bin/env python3.8
# vim: set fileencoding=<utf-8> :
import string
import random
class User:
'''
This Class contains user methods and attrs
'''
user_info = []
def __init__(self,first,last,password):
"""
Information needed to create a password saving object
"""
self.first = first
self.last = last
self.password = password
def create_user(self):
"""
Create an instance of a new user
"""
self.user_info.append(self)
class Credentials(User):
"""
Class that holds credential information and associated methods eg. add, remove and view creadentials
"""
credentials_info = []
user_cred_info = []
@classmethod
def user_check(cls,first,password):
"""
Checks for matching credentials in user_info
"""
for cred in cls.user_info:
if cred.first == first and cred.password == password:
identity = cred.first
return identity
def __init__(self,name,username,platform,pwd):
"""
Initialize new Credentials object
"""
self.name = name
self.username = username
self.platform = platform
self.pwd = <PASSWORD>
def save_cred(self):
"""
Saves credentials in credentials_info list
"""
Credentials.credentials_info.append(self)
def password_gen(size):
"""
Generate a random string of letters and digits
"""
lettersAndNumbers = string.ascii_letters + string.digits
password = ''.join(random.choice(lettersAndNumbers) for i in range(int(size)))
return password
@classmethod
def show_credentials(cls,username):
"""
Shows the saved credentials
"""
for cred in cls.credentials_info:
if cred.username == username:
cls.user_cred_info.append(cred)
return cls.user_cred_info
@classmethod
def find_platform(cls,platform):
"""
Finds the platform's credentials
"""
for cred in cls.credentials_info:
if cred.platform == platform:
return cred
@classmethod
def del_cred(cls,cred):
"""
Deletes credentials saved, and used together with the find platform method
"""
for credential in cls.credentials_info:
if credential == cred:
del credential
return "Deleted"
|
en
| 0.792189
|
#!/usr/bin/env python3.8 # vim: set fileencoding=<utf-8> : This Class contains user methods and attrs Information needed to create a password saving object Create an instance of a new user Class that holds credential information and associated methods eg. add, remove and view creadentials Checks for matching credentials in user_info Initialize new Credentials object Saves credentials in credentials_info list Generate a random string of letters and digits Shows the saved credentials Finds the platform's credentials Deletes credentials saved, and used together with the find platform method
| 3.609144
| 4
|
CeV - Gustavo Guanabara/exerc062.py
|
us19861229c/Meu-aprendizado-Python
| 1
|
6629209
|
print("Gerador de PA")
i = int(input("Digite o termo: "))
p = int(input("Digite a razão: "))
termo = i
cont = 1
total = 0
mais = 10
while mais != 0:
total = total + mais
while cont <= total:
print(f"{cont}o. Termo: {termo}...", end="")
termo += p
cont += 1
print('Espera')
mais = int(input("Quantos termos mais? "))
print(f"Foram solicitados {total} termos.")
|
print("Gerador de PA")
i = int(input("Digite o termo: "))
p = int(input("Digite a razão: "))
termo = i
cont = 1
total = 0
mais = 10
while mais != 0:
total = total + mais
while cont <= total:
print(f"{cont}o. Termo: {termo}...", end="")
termo += p
cont += 1
print('Espera')
mais = int(input("Quantos termos mais? "))
print(f"Foram solicitados {total} termos.")
|
none
| 1
| 3.826592
| 4
|
|
Exercicios em python/ex62.py
|
GabrielSantos25/Python
| 0
|
6629210
|
<reponame>GabrielSantos25/Python
#Melhorada progressão aritmética em while
termo = int(input('Informe o primeiro termo: '))
razao = int(input('Informe a razão: '))
primeiro = termo
cont = 1
novo = 10
total = 0
while novo != 0:
total += novo
while cont <= total:
print(f'{primeiro}', end=' -> ')
primeiro += razao
cont += 1
print('Acabou')
novo = int(input('Quer mostrar mais termos? '))
print(f'Progressão finalizada com {total} termos ao todo')
|
#Melhorada progressão aritmética em while
termo = int(input('Informe o primeiro termo: '))
razao = int(input('Informe a razão: '))
primeiro = termo
cont = 1
novo = 10
total = 0
while novo != 0:
total += novo
while cont <= total:
print(f'{primeiro}', end=' -> ')
primeiro += razao
cont += 1
print('Acabou')
novo = int(input('Quer mostrar mais termos? '))
print(f'Progressão finalizada com {total} termos ao todo')
|
pt
| 0.99903
|
#Melhorada progressão aritmética em while
| 3.882782
| 4
|
S4/S4 Library/simulation/venues/chalet_garden/chalet_garden_situation.py
|
NeonOcean/Environment
| 1
|
6629211
|
from sims4.tuning.instances import lock_instance_tunables
from sims4.tuning.tunable import TunableReference
from sims4.tuning.tunable_base import GroupNames
from situations.bouncer.bouncer_types import RequestSpawningOption, BouncerRequestPriority, BouncerExclusivityCategory
from situations.situation import Situation
from situations.situation_complex import TunableSituationJobAndRoleState, SituationComplexCommon, SituationState, SituationStateData
from situations.situation_guest_list import SituationGuestList, SituationGuestInfo
from situations.situation_types import SituationCreationUIOption
import filters
import services
import sims4
logger = sims4.log.Logger('ChaletGardenSituation', default_owner='trevor')
class _GroupState(SituationState):
pass
class ChaletGardenSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'man_job_and_role': TunableSituationJobAndRoleState(description='\n The job and role state for the man on the Chalet Garden lot.\n '), 'woman_job_and_role': TunableSituationJobAndRoleState(description='\n The job and role state for the man on the Chalet Garden lot.\n '), 'group_filter': TunableReference(description='\n The group filter for these Sims. This filter is what will\n setup the Sims that need to spawn in. They will be added\n to Individual Sim Situations.\n ', manager=services.get_instance_manager(sims4.resources.Types.SIM_FILTER), class_restrictions=filters.tunable.TunableAggregateFilter, tuning_group=GroupNames.ROLES)}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
@classmethod
def _states(cls):
return (SituationStateData(1, _GroupState),)
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.man_job_and_role.job, cls.man_job_and_role.role_state), (cls.woman_job_and_role.job, cls.woman_job_and_role.role_state)]
@classmethod
def default_job(cls):
pass
@classmethod
def get_predefined_guest_list(cls):
guest_list = SituationGuestList(invite_only=True)
worker_filter = cls.group_filter if cls.group_filter is not None else cls.default_job().filter
filter_results = services.sim_filter_service().submit_matching_filter(sim_filter=worker_filter, allow_yielding=False, gsi_source_fn=cls.get_sim_filter_gsi_name)
if not filter_results:
logger.error('Failed to find/create any sims for {};', cls)
return guest_list
for result in filter_results:
job = cls.man_job_and_role.job if result.sim_info.is_male else cls.woman_job_and_role.job
guest_list.add_guest_info(SituationGuestInfo(result.sim_info.sim_id, job, RequestSpawningOption.DONT_CARE, BouncerRequestPriority.BACKGROUND_MEDIUM))
return guest_list
def start_situation(self):
super().start_situation()
self._change_state(_GroupState())
lock_instance_tunables(ChaletGardenSituation, exclusivity=BouncerExclusivityCategory.VENUE_EMPLOYEE, creation_ui_option=SituationCreationUIOption.NOT_AVAILABLE, _implies_greeted_status=False)
|
from sims4.tuning.instances import lock_instance_tunables
from sims4.tuning.tunable import TunableReference
from sims4.tuning.tunable_base import GroupNames
from situations.bouncer.bouncer_types import RequestSpawningOption, BouncerRequestPriority, BouncerExclusivityCategory
from situations.situation import Situation
from situations.situation_complex import TunableSituationJobAndRoleState, SituationComplexCommon, SituationState, SituationStateData
from situations.situation_guest_list import SituationGuestList, SituationGuestInfo
from situations.situation_types import SituationCreationUIOption
import filters
import services
import sims4
logger = sims4.log.Logger('ChaletGardenSituation', default_owner='trevor')
class _GroupState(SituationState):
pass
class ChaletGardenSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'man_job_and_role': TunableSituationJobAndRoleState(description='\n The job and role state for the man on the Chalet Garden lot.\n '), 'woman_job_and_role': TunableSituationJobAndRoleState(description='\n The job and role state for the man on the Chalet Garden lot.\n '), 'group_filter': TunableReference(description='\n The group filter for these Sims. This filter is what will\n setup the Sims that need to spawn in. They will be added\n to Individual Sim Situations.\n ', manager=services.get_instance_manager(sims4.resources.Types.SIM_FILTER), class_restrictions=filters.tunable.TunableAggregateFilter, tuning_group=GroupNames.ROLES)}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
@classmethod
def _states(cls):
return (SituationStateData(1, _GroupState),)
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.man_job_and_role.job, cls.man_job_and_role.role_state), (cls.woman_job_and_role.job, cls.woman_job_and_role.role_state)]
@classmethod
def default_job(cls):
pass
@classmethod
def get_predefined_guest_list(cls):
guest_list = SituationGuestList(invite_only=True)
worker_filter = cls.group_filter if cls.group_filter is not None else cls.default_job().filter
filter_results = services.sim_filter_service().submit_matching_filter(sim_filter=worker_filter, allow_yielding=False, gsi_source_fn=cls.get_sim_filter_gsi_name)
if not filter_results:
logger.error('Failed to find/create any sims for {};', cls)
return guest_list
for result in filter_results:
job = cls.man_job_and_role.job if result.sim_info.is_male else cls.woman_job_and_role.job
guest_list.add_guest_info(SituationGuestInfo(result.sim_info.sim_id, job, RequestSpawningOption.DONT_CARE, BouncerRequestPriority.BACKGROUND_MEDIUM))
return guest_list
def start_situation(self):
super().start_situation()
self._change_state(_GroupState())
lock_instance_tunables(ChaletGardenSituation, exclusivity=BouncerExclusivityCategory.VENUE_EMPLOYEE, creation_ui_option=SituationCreationUIOption.NOT_AVAILABLE, _implies_greeted_status=False)
|
none
| 1
| 2.200637
| 2
|
|
checkout/tests/factories.py
|
kevin-ci/janeric2
| 1
|
6629212
|
import factory
from faker import Faker
from factory import lazy_attribute
from products.tests.factories import (
CategoryFactory,
Product_FamilyFactory,
ProductFactory,
)
fake = Faker()
quantity = factory.Faker("random_int", min=1, max=50)
#quantity = factory.LazyAttribute(lambda x: random.randrange(1, 30))
|
import factory
from faker import Faker
from factory import lazy_attribute
from products.tests.factories import (
CategoryFactory,
Product_FamilyFactory,
ProductFactory,
)
fake = Faker()
quantity = factory.Faker("random_int", min=1, max=50)
#quantity = factory.LazyAttribute(lambda x: random.randrange(1, 30))
|
en
| 0.162794
|
#quantity = factory.LazyAttribute(lambda x: random.randrange(1, 30))
| 2.373216
| 2
|
app.py
|
Build-Week-Spotify-Song-Suggester-01/back-end
| 1
|
6629213
|
# import statements
from flask import Flask, render_template, request
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
import matplotlib.pyplot as plt
import numpy as np
# making and configuring flask app
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['EXPLAIN_TEMPLATE_LOADING'] = True
# token for Spotify API
token = SpotifyClientCredentials(
client_id="<KEY>",
client_secret="4326ec02ca5e4df6b46066bb774006a4")
# connecting to Spotipy using token
sp = spotipy.Spotify(auth_manager=token)
def get_5_recs(song_name):
""" Function to use .recommendations() to
return 5 similar songs """
song = sp.search(q=song_name)
song_id = song['tracks']['items'][0]['id']
song_name = song['tracks']['items'][0]['name']
song_artist = song['tracks']['items'][0]['album']['artists'][0]['name']
# .recommendations returns 5 similar songs
recs = sp.recommendations(seed_tracks=[str(song_id)],
limit=5)['tracks']
track_ids = []
track_titles = []
track_artists = []
track_refs = []
artist_refs = []
# for loop to return track id, title, artist and the url
# for song recommendations
for rec in recs:
track_ids.append(rec['id'])
track_titles.append(rec['name'])
track_artists.append(rec['album']['artists'][0]['name'])
track_refs.append(rec['external_urls']['spotify'])
artist_refs.append(
rec['album']['artists'][0]['external_urls']['spotify'])
return track_ids, track_titles, track_artists, track_refs, artist_refs
def get_audio_features(track_ids, names):
""" Function to get audio features and return
as a dictionary """
features = []
for track in track_ids:
metrics = sp.audio_features(track)[0]
features_ = {k: v for k, v in metrics.items()}
features.append(features_)
return dict(zip(names, features))
def make_graphs(song):
'''makes 6 bar graphs displaying the audio features of
the input song and the 5 suggested songs'''
# get recommended songs
ids, names, artists, t_refs, a_refs = get_5_recs(song)
# get audio features of recommended songs
features = []
for track in ids:
features.append(sp.audio_features(track)[0])
# get features for original song
original_song = sp.search(q=song)
song_id = original_song['tracks']['items'][0]['id']
original_features = sp.audio_features(song_id)
# binary features are between 0 and 1
binary_feats = ['danceability', 'energy', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence']
# other features are not between 0 and 1 but still numeric
other_feats = ['mode', 'time_signature', 'tempo',
'duration_ms', 'loudness']
# get binary feature values for original song
binary_vals = []
for feat in binary_feats:
binary_vals.append(original_features[0][feat])
# create subplots
fig, ax = plt.subplots(6, 1, sharex='col', sharey='col', figsize=(6, 8))
# configure subplots
plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9,
top=1.5, wspace=0.2, hspace=0.5)
# plot the original song's metrics
y_pos = np.arange(len(binary_feats))
ax[0].barh(y_pos, binary_vals, align='center',
color=['b', 'm', 'g', 'b', 'tab:orange', 'y', 'r'],
edgecolor='k', linewidth=1)
# set up rest of graph
ax[0].set_yticks(y_pos)
ax[0].set_yticklabels(binary_feats)
ax[0].invert_yaxis() # labels read top-to-bottom
ax[0].set_xlabel('Rating')
ax[0].set_title('Original Song Metrics')
# loop through each suggested song
for i in range(len(ids)):
# get audio features for each song
binary_vals = []
for feat in binary_feats:
binary_vals.append(features[i][feat])
# plot each song and label the x-axis and title
ax[i+1].barh(y_pos, binary_vals, align='center',
color=['b', 'm', 'g', 'b', 'tab:orange', 'y', 'r'],
edgecolor='k', linewidth=1)
ax[i+1].set_xlabel('Rating')
ax[i+1].set_title(f'Suggested Song {i+1} Metrics')
plt.show()
@app.route('/', methods=["POST", "GET"])
def home():
""" Home function to return song and audio metrics """
# getting song and assigning to "name"
name = request.form.get('song_name')
artist = ''
artist_href = ''
song_name = ''
song_href = ''
release_date = ''
acoustic = None
dance = None
energy = None
live = None
speech = None
valence = None
instrument = None
titles = []
artists = []
ids = []
track_refs = []
artist_refs = []
if name:
song = sp.search(q=name)
artist = song['tracks']['items'][0]['album']['artists'][0]['name']
artist_href = song['tracks']['items'][0]['artists'][0]\
['external_urls']['spotify']
song_name = song['tracks']['items'][0]['name']
song_href = song['tracks']['items'][0]['external_urls']['spotify']
release_date = song['tracks']['items'][0]['album']['release_date']
# assigning song metrics to "features"
features = sp.audio_features(song_href)
acoustic = features[0]['acousticness']
dance = features[0]['danceability']
energy = features[0]['energy']
live = features[0]['liveness']
speech = features[0]['speechiness']
valence = features[0]['valence']
instrument = features[0]['instrumentalness']
ids, titles, artists, track_refs, artist_refs = get_5_recs(name)
# retuning the template for html encoding
return render_template('home.html', artist=artist, artist_href=artist_href,
song_name=song_name, song_href=song_href,
release_date=release_date, acoustic=acoustic,
dance=dance, energy=energy, live=live,
speech=speech, valence=valence, titles=titles,
artists=artists, track_refs=track_refs,
artist_refs=artist_refs, instrument=instrument)
|
# import statements
from flask import Flask, render_template, request
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
import matplotlib.pyplot as plt
import numpy as np
# making and configuring flask app
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['EXPLAIN_TEMPLATE_LOADING'] = True
# token for Spotify API
token = SpotifyClientCredentials(
client_id="<KEY>",
client_secret="4326ec02ca5e4df6b46066bb774006a4")
# connecting to Spotipy using token
sp = spotipy.Spotify(auth_manager=token)
def get_5_recs(song_name):
""" Function to use .recommendations() to
return 5 similar songs """
song = sp.search(q=song_name)
song_id = song['tracks']['items'][0]['id']
song_name = song['tracks']['items'][0]['name']
song_artist = song['tracks']['items'][0]['album']['artists'][0]['name']
# .recommendations returns 5 similar songs
recs = sp.recommendations(seed_tracks=[str(song_id)],
limit=5)['tracks']
track_ids = []
track_titles = []
track_artists = []
track_refs = []
artist_refs = []
# for loop to return track id, title, artist and the url
# for song recommendations
for rec in recs:
track_ids.append(rec['id'])
track_titles.append(rec['name'])
track_artists.append(rec['album']['artists'][0]['name'])
track_refs.append(rec['external_urls']['spotify'])
artist_refs.append(
rec['album']['artists'][0]['external_urls']['spotify'])
return track_ids, track_titles, track_artists, track_refs, artist_refs
def get_audio_features(track_ids, names):
""" Function to get audio features and return
as a dictionary """
features = []
for track in track_ids:
metrics = sp.audio_features(track)[0]
features_ = {k: v for k, v in metrics.items()}
features.append(features_)
return dict(zip(names, features))
def make_graphs(song):
'''makes 6 bar graphs displaying the audio features of
the input song and the 5 suggested songs'''
# get recommended songs
ids, names, artists, t_refs, a_refs = get_5_recs(song)
# get audio features of recommended songs
features = []
for track in ids:
features.append(sp.audio_features(track)[0])
# get features for original song
original_song = sp.search(q=song)
song_id = original_song['tracks']['items'][0]['id']
original_features = sp.audio_features(song_id)
# binary features are between 0 and 1
binary_feats = ['danceability', 'energy', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence']
# other features are not between 0 and 1 but still numeric
other_feats = ['mode', 'time_signature', 'tempo',
'duration_ms', 'loudness']
# get binary feature values for original song
binary_vals = []
for feat in binary_feats:
binary_vals.append(original_features[0][feat])
# create subplots
fig, ax = plt.subplots(6, 1, sharex='col', sharey='col', figsize=(6, 8))
# configure subplots
plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9,
top=1.5, wspace=0.2, hspace=0.5)
# plot the original song's metrics
y_pos = np.arange(len(binary_feats))
ax[0].barh(y_pos, binary_vals, align='center',
color=['b', 'm', 'g', 'b', 'tab:orange', 'y', 'r'],
edgecolor='k', linewidth=1)
# set up rest of graph
ax[0].set_yticks(y_pos)
ax[0].set_yticklabels(binary_feats)
ax[0].invert_yaxis() # labels read top-to-bottom
ax[0].set_xlabel('Rating')
ax[0].set_title('Original Song Metrics')
# loop through each suggested song
for i in range(len(ids)):
# get audio features for each song
binary_vals = []
for feat in binary_feats:
binary_vals.append(features[i][feat])
# plot each song and label the x-axis and title
ax[i+1].barh(y_pos, binary_vals, align='center',
color=['b', 'm', 'g', 'b', 'tab:orange', 'y', 'r'],
edgecolor='k', linewidth=1)
ax[i+1].set_xlabel('Rating')
ax[i+1].set_title(f'Suggested Song {i+1} Metrics')
plt.show()
@app.route('/', methods=["POST", "GET"])
def home():
""" Home function to return song and audio metrics """
# getting song and assigning to "name"
name = request.form.get('song_name')
artist = ''
artist_href = ''
song_name = ''
song_href = ''
release_date = ''
acoustic = None
dance = None
energy = None
live = None
speech = None
valence = None
instrument = None
titles = []
artists = []
ids = []
track_refs = []
artist_refs = []
if name:
song = sp.search(q=name)
artist = song['tracks']['items'][0]['album']['artists'][0]['name']
artist_href = song['tracks']['items'][0]['artists'][0]\
['external_urls']['spotify']
song_name = song['tracks']['items'][0]['name']
song_href = song['tracks']['items'][0]['external_urls']['spotify']
release_date = song['tracks']['items'][0]['album']['release_date']
# assigning song metrics to "features"
features = sp.audio_features(song_href)
acoustic = features[0]['acousticness']
dance = features[0]['danceability']
energy = features[0]['energy']
live = features[0]['liveness']
speech = features[0]['speechiness']
valence = features[0]['valence']
instrument = features[0]['instrumentalness']
ids, titles, artists, track_refs, artist_refs = get_5_recs(name)
# retuning the template for html encoding
return render_template('home.html', artist=artist, artist_href=artist_href,
song_name=song_name, song_href=song_href,
release_date=release_date, acoustic=acoustic,
dance=dance, energy=energy, live=live,
speech=speech, valence=valence, titles=titles,
artists=artists, track_refs=track_refs,
artist_refs=artist_refs, instrument=instrument)
|
en
| 0.877218
|
# import statements # making and configuring flask app # token for Spotify API # connecting to Spotipy using token Function to use .recommendations() to return 5 similar songs # .recommendations returns 5 similar songs # for loop to return track id, title, artist and the url # for song recommendations Function to get audio features and return as a dictionary makes 6 bar graphs displaying the audio features of the input song and the 5 suggested songs # get recommended songs # get audio features of recommended songs # get features for original song # binary features are between 0 and 1 # other features are not between 0 and 1 but still numeric # get binary feature values for original song # create subplots # configure subplots # plot the original song's metrics # set up rest of graph # labels read top-to-bottom # loop through each suggested song # get audio features for each song # plot each song and label the x-axis and title Home function to return song and audio metrics # getting song and assigning to "name" # assigning song metrics to "features" # retuning the template for html encoding
| 3.288523
| 3
|
data/utils.py
|
jlorenze/asl_fixedwing
| 4
|
6629214
|
from os.path import dirname, abspath, join, isfile, isdir
import sys
import rosbag
import numpy as np
def get_data_dir():
return dirname(abspath(__file__))
def get_models_dir():
return join(dirname(dirname(abspath(__file__))), 'models')
def get_utils_dir():
return join(dirname(dirname(abspath(__file__))), 'src/utils')
class pointStream:
def __init__(self):
self.x = []
self.y = []
self.z = []
self.t = []
def add_point(self, t, x, y, z):
self.x.append(x)
self.y.append(y)
self.z.append(z)
self.t.append(t)
class ctrlStream:
def __init__(self):
self.u = [[], [], [], []]
self.t = []
def add_point(self, t, u):
self.t.append(t)
for i in range(4):
self.u[i].append(u[i])
class zStream:
def __init__(self):
self.z = []
self.t = []
def add_point(self, t, z):
self.t.append(t)
if not self.z:
self.z = [[z[i]] for i in range(len(z))]
else:
for i in range(len(z)):
self.z[i].append(z[i])
class planeData:
"""
A class to extract data from Plane topic messages
"""
def __init__(self):
self.pos = pointStream() # inertial pos x_i, y_i, z_i [m]
self.vel = pointStream() # body frame vel u, v, w [m/s]
self.euler = pointStream() # euler angle phi, th, psi [rad]
self.om = pointStream() # body rate p, q, r [rad/s]
self.act = ctrlStream() # thrust [N] and ctrl srf def [rad]
self.nrmlzd_act = ctrlStream() # normalized actuators
# Total velocity
self.vel.V = []
def add_msg(self, topic, msg, t):
"""
Add a piece of data from a ROS message
"""
if topic == 'position':
self.pos.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'velocity':
self.vel.add_point(t, msg.point.x, msg.point.y, msg.point.z)
self.vel.V.append(np.sqrt(msg.point.x**2 + msg.point.y**2 + msg.point.z**2))
elif topic == 'euler':
self.euler.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'bodyrate':
self.om.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'actuators':
self.act.add_point(t, msg.controls)
elif topic == 'target_actuator_control':
self.nrmlzd_act.add_point(t, msg.controls)
class rompcData:
"""
A class to extract data from ROMPC topic messages
Note e_att is either euler angles or axis/angle param depending
on the type of model used.
Note e_attrate is either body rates p,q,r or axis/angle rates depending
on the type of model used.
"""
def __init__(self):
self.e_pos = pointStream() # pos error x_r, y_r, z_r [m]
self.e_vel = pointStream() # body frame vel error [m/s]
self.e_att = pointStream() # attitude error [rad]
self.e_attrate = pointStream() # attitude rate error [rad/s]
self.ubar = ctrlStream() # nominal control minus eq. control
self.u = ctrlStream() # control minus eq. control
self.zbar = zStream()
self.zhat = zStream()
self.u_prev = ctrlStream() # Control used in state estimator
self.y = zStream()
self.qp_solve_time = zStream()
def add_msg(self, topic, msg, t):
"""
Add a piece of data from a ROS message
"""
if topic == 'pos_error':
self.e_pos.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'vel_error':
self.e_vel.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'att_error':
self.e_att.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'attrate_error':
self.e_attrate.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'ubar':
self.ubar.add_point(t, msg.data)
elif topic == 'u':
self.u.add_point(t, msg.data)
elif topic == 'zbar':
self.zbar.add_point(t, msg.data)
elif topic == 'zhat':
self.zhat.add_point(t, msg.data)
elif topic == 'u_prev':
self.u_prev.add_point(t, msg.data)
elif topic == 'y':
self.y.add_point(t, msg.data)
elif topic == 'qp_solve_time':
self.qp_solve_time.add_point(t, [msg.data])
class RosbagData:
"""
This class extracts rosbag data
"""
def __init__(self, fpath):
self.plane = planeData()
self.rompc = rompcData()
self.t0 = None
bag = rosbag.Bag(fpath)
topics = ['/plane/position', '/plane/velocity',
'/plane/euler', '/plane/bodyrate',
'/plane/actuators', '/rompc/pos_error',
'/rompc/vel_error', '/rompc/att_error',
'/rompc/attrate_error',
'/rompc/ubar', '/rompc/u',
'/rompc/zbar', '/rompc/zhat',
'/rompc/u_prev', '/rompc/y',
'/rompc/qp_solve_time',
'/mavros/target_actuator_control']
for topic, msg, t in bag.read_messages(topics=topics):
self.add_msg(msg, topic)
def extract_time(self, msg):
t = msg.header.stamp.secs + msg.header.stamp.nsecs/1e9
if self.t0 is None:
self.t0 = t
return t - self.t0
def add_msg(self, msg, topic):
main, sub = topic.split('/')[1:3]
if sub == 'qp_solve_time':
t = 0
else:
t = self.extract_time(msg)
if main == 'plane' or main == 'mavros':
self.plane.add_msg(sub, msg, t)
elif main == 'rompc':
self.rompc.add_msg(sub, msg, t)
if __name__ == '__main__':
data_dir = get_data_dir()
fpath = join(data_dir, 'rompc.bag')
data = RosbagData(fpath)
|
from os.path import dirname, abspath, join, isfile, isdir
import sys
import rosbag
import numpy as np
def get_data_dir():
return dirname(abspath(__file__))
def get_models_dir():
return join(dirname(dirname(abspath(__file__))), 'models')
def get_utils_dir():
return join(dirname(dirname(abspath(__file__))), 'src/utils')
class pointStream:
def __init__(self):
self.x = []
self.y = []
self.z = []
self.t = []
def add_point(self, t, x, y, z):
self.x.append(x)
self.y.append(y)
self.z.append(z)
self.t.append(t)
class ctrlStream:
def __init__(self):
self.u = [[], [], [], []]
self.t = []
def add_point(self, t, u):
self.t.append(t)
for i in range(4):
self.u[i].append(u[i])
class zStream:
def __init__(self):
self.z = []
self.t = []
def add_point(self, t, z):
self.t.append(t)
if not self.z:
self.z = [[z[i]] for i in range(len(z))]
else:
for i in range(len(z)):
self.z[i].append(z[i])
class planeData:
"""
A class to extract data from Plane topic messages
"""
def __init__(self):
self.pos = pointStream() # inertial pos x_i, y_i, z_i [m]
self.vel = pointStream() # body frame vel u, v, w [m/s]
self.euler = pointStream() # euler angle phi, th, psi [rad]
self.om = pointStream() # body rate p, q, r [rad/s]
self.act = ctrlStream() # thrust [N] and ctrl srf def [rad]
self.nrmlzd_act = ctrlStream() # normalized actuators
# Total velocity
self.vel.V = []
def add_msg(self, topic, msg, t):
"""
Add a piece of data from a ROS message
"""
if topic == 'position':
self.pos.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'velocity':
self.vel.add_point(t, msg.point.x, msg.point.y, msg.point.z)
self.vel.V.append(np.sqrt(msg.point.x**2 + msg.point.y**2 + msg.point.z**2))
elif topic == 'euler':
self.euler.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'bodyrate':
self.om.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'actuators':
self.act.add_point(t, msg.controls)
elif topic == 'target_actuator_control':
self.nrmlzd_act.add_point(t, msg.controls)
class rompcData:
"""
A class to extract data from ROMPC topic messages
Note e_att is either euler angles or axis/angle param depending
on the type of model used.
Note e_attrate is either body rates p,q,r or axis/angle rates depending
on the type of model used.
"""
def __init__(self):
self.e_pos = pointStream() # pos error x_r, y_r, z_r [m]
self.e_vel = pointStream() # body frame vel error [m/s]
self.e_att = pointStream() # attitude error [rad]
self.e_attrate = pointStream() # attitude rate error [rad/s]
self.ubar = ctrlStream() # nominal control minus eq. control
self.u = ctrlStream() # control minus eq. control
self.zbar = zStream()
self.zhat = zStream()
self.u_prev = ctrlStream() # Control used in state estimator
self.y = zStream()
self.qp_solve_time = zStream()
def add_msg(self, topic, msg, t):
"""
Add a piece of data from a ROS message
"""
if topic == 'pos_error':
self.e_pos.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'vel_error':
self.e_vel.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'att_error':
self.e_att.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'attrate_error':
self.e_attrate.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'ubar':
self.ubar.add_point(t, msg.data)
elif topic == 'u':
self.u.add_point(t, msg.data)
elif topic == 'zbar':
self.zbar.add_point(t, msg.data)
elif topic == 'zhat':
self.zhat.add_point(t, msg.data)
elif topic == 'u_prev':
self.u_prev.add_point(t, msg.data)
elif topic == 'y':
self.y.add_point(t, msg.data)
elif topic == 'qp_solve_time':
self.qp_solve_time.add_point(t, [msg.data])
class RosbagData:
"""
This class extracts rosbag data
"""
def __init__(self, fpath):
self.plane = planeData()
self.rompc = rompcData()
self.t0 = None
bag = rosbag.Bag(fpath)
topics = ['/plane/position', '/plane/velocity',
'/plane/euler', '/plane/bodyrate',
'/plane/actuators', '/rompc/pos_error',
'/rompc/vel_error', '/rompc/att_error',
'/rompc/attrate_error',
'/rompc/ubar', '/rompc/u',
'/rompc/zbar', '/rompc/zhat',
'/rompc/u_prev', '/rompc/y',
'/rompc/qp_solve_time',
'/mavros/target_actuator_control']
for topic, msg, t in bag.read_messages(topics=topics):
self.add_msg(msg, topic)
def extract_time(self, msg):
t = msg.header.stamp.secs + msg.header.stamp.nsecs/1e9
if self.t0 is None:
self.t0 = t
return t - self.t0
def add_msg(self, msg, topic):
main, sub = topic.split('/')[1:3]
if sub == 'qp_solve_time':
t = 0
else:
t = self.extract_time(msg)
if main == 'plane' or main == 'mavros':
self.plane.add_msg(sub, msg, t)
elif main == 'rompc':
self.rompc.add_msg(sub, msg, t)
if __name__ == '__main__':
data_dir = get_data_dir()
fpath = join(data_dir, 'rompc.bag')
data = RosbagData(fpath)
|
en
| 0.633108
|
A class to extract data from Plane topic messages # inertial pos x_i, y_i, z_i [m] # body frame vel u, v, w [m/s] # euler angle phi, th, psi [rad] # body rate p, q, r [rad/s] # thrust [N] and ctrl srf def [rad] # normalized actuators # Total velocity Add a piece of data from a ROS message A class to extract data from ROMPC topic messages Note e_att is either euler angles or axis/angle param depending on the type of model used. Note e_attrate is either body rates p,q,r or axis/angle rates depending on the type of model used. # pos error x_r, y_r, z_r [m] # body frame vel error [m/s] # attitude error [rad] # attitude rate error [rad/s] # nominal control minus eq. control # control minus eq. control # Control used in state estimator Add a piece of data from a ROS message This class extracts rosbag data
| 2.765501
| 3
|
6_ObjectOriented/exceptions.py
|
felixdittrich92/Python3
| 1
|
6629215
|
<filename>6_ObjectOriented/exceptions.py
import numbers
import builtins
from math import sqrt
from functools import total_ordering
@total_ordering
class Vector2D:
def __init__(self, x=0, y=0):
if isinstance(x, numbers.Real) and isinstance(y, numbers.Real): # Numbers.Real = reele Zahl
self.x = x
self.y = y
else:
raise TypeError('You must pass in int/float values for x and y!') # welche Exception geworfen werden soll
def __call__(self):
print("Calling the __call__ function!")
return self.__repr__()
def __repr__(self):
return 'vector.Vector2D({}, {})'.format(self.x, self.y)
def __str__(self):
return '({}, {})'.format(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def __abs__(self):
return sqrt(pow(self.x, 2) + pow(self.y, 2))
def check_vector_types(self, vector2):
if not isinstance(self, Vector2D) or not isinstance(vector2, Vector2D):
raise TypeError('You have to pass in two instances of the vector class!')
def __eq__(self, other_vector):
self.check_vector_types(other_vector) # überprüfen ob Vector
if self.x == other_vector.x and self.y == other_vector.y:
return True
else:
return False
def __lt__(self, other_vector):
self.check_vector_types(other_vector) # überprüfen ob Vector
if abs(self) < abs(other_vector):
return True
else:
return False
def __add__(self, other_vector):
self.check_vector_types(other_vector) # überprüfen ob Vector
x = self.x + other_vector.x
y = self.y + other_vector.y
return Vector2D(x, y)
# try (== 1):
# except (>= 1):
# finally (optional):
def __sub__(self, other_vector):
try:
x = self.x - other_vector.x
y = self.y - other_vector.y
return Vector2D(x, y)
except AttributeError as e:
print("AttributeError: {} was raised!".format(e))
return self
except Exception as e:
print("Exception {}: {} was raised!".format(type(e), e))
finally: # wird immer am Schluss ausgeführt
print("finally")
def __mul__(self, other):
if isinstance(other, Vector2D):
return self.x * other.x + self.y * other.y
elif isinstance(other, numbers.Real):
return Vector2D(self.x * other, self.y * other)
else:
raise TypeError('You must pass in a vector instance or an int/float number!')
def __truediv__(self, other):
if isinstance(other, numbers.Real):
if other != 0.0:
return Vector2D(self.x / other, self.y / other)
else:
raise ValueError('You cannot divide by zero!') # Werterror wenn 0
else:
raise TypeError('You must pass in an int/float value!')
# alle Exceptions
builtin_list = [builtin for builtin in dir(builtins) if 'Error' in builtin]
print(builtin_list)
print('\n')
v1 = Vector2D(3, 2)
v2 = Vector2D(1, 2)
print(v1 - 2)
# v3 = Vector2D('S', 'e')
|
<filename>6_ObjectOriented/exceptions.py
import numbers
import builtins
from math import sqrt
from functools import total_ordering
@total_ordering
class Vector2D:
def __init__(self, x=0, y=0):
if isinstance(x, numbers.Real) and isinstance(y, numbers.Real): # Numbers.Real = reele Zahl
self.x = x
self.y = y
else:
raise TypeError('You must pass in int/float values for x and y!') # welche Exception geworfen werden soll
def __call__(self):
print("Calling the __call__ function!")
return self.__repr__()
def __repr__(self):
return 'vector.Vector2D({}, {})'.format(self.x, self.y)
def __str__(self):
return '({}, {})'.format(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def __abs__(self):
return sqrt(pow(self.x, 2) + pow(self.y, 2))
def check_vector_types(self, vector2):
if not isinstance(self, Vector2D) or not isinstance(vector2, Vector2D):
raise TypeError('You have to pass in two instances of the vector class!')
def __eq__(self, other_vector):
self.check_vector_types(other_vector) # überprüfen ob Vector
if self.x == other_vector.x and self.y == other_vector.y:
return True
else:
return False
def __lt__(self, other_vector):
self.check_vector_types(other_vector) # überprüfen ob Vector
if abs(self) < abs(other_vector):
return True
else:
return False
def __add__(self, other_vector):
self.check_vector_types(other_vector) # überprüfen ob Vector
x = self.x + other_vector.x
y = self.y + other_vector.y
return Vector2D(x, y)
# try (== 1):
# except (>= 1):
# finally (optional):
def __sub__(self, other_vector):
try:
x = self.x - other_vector.x
y = self.y - other_vector.y
return Vector2D(x, y)
except AttributeError as e:
print("AttributeError: {} was raised!".format(e))
return self
except Exception as e:
print("Exception {}: {} was raised!".format(type(e), e))
finally: # wird immer am Schluss ausgeführt
print("finally")
def __mul__(self, other):
if isinstance(other, Vector2D):
return self.x * other.x + self.y * other.y
elif isinstance(other, numbers.Real):
return Vector2D(self.x * other, self.y * other)
else:
raise TypeError('You must pass in a vector instance or an int/float number!')
def __truediv__(self, other):
if isinstance(other, numbers.Real):
if other != 0.0:
return Vector2D(self.x / other, self.y / other)
else:
raise ValueError('You cannot divide by zero!') # Werterror wenn 0
else:
raise TypeError('You must pass in an int/float value!')
# alle Exceptions
builtin_list = [builtin for builtin in dir(builtins) if 'Error' in builtin]
print(builtin_list)
print('\n')
v1 = Vector2D(3, 2)
v2 = Vector2D(1, 2)
print(v1 - 2)
# v3 = Vector2D('S', 'e')
|
de
| 0.868178
|
# Numbers.Real = reele Zahl # welche Exception geworfen werden soll # überprüfen ob Vector # überprüfen ob Vector # überprüfen ob Vector # try (== 1): # except (>= 1): # finally (optional): # wird immer am Schluss ausgeführt # Werterror wenn 0 # alle Exceptions # v3 = Vector2D('S', 'e')
| 3.371463
| 3
|
coworker/place/apps.py
|
flybackl/spacesmap
| 0
|
6629216
|
from django.apps import AppConfig
class PlaceConfig(AppConfig):
name = 'coworker.place'
|
from django.apps import AppConfig
class PlaceConfig(AppConfig):
name = 'coworker.place'
|
none
| 1
| 1.17902
| 1
|
|
src/SyntOn_Classifier.py
|
docking-org/SynthI
| 0
|
6629217
|
<gh_stars>0
from rdkit import Chem
from rdkit.Chem.rdMolDescriptors import *
import os, json,sys
srcPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(1, srcPath)
from UsefulFunctions import *
def main(args):
for ind, line in enumerate(open(args.input)):
sline = line.strip()
if sline and ind != 0:
if ind % 1000 == 0:
print(str(ind))
Classes = BBClassifier(molSmiles=sline.split()[0])
if Classes:
for Class in Classes:
with open(Class + ".smi", "a") as out:
out.write(line)
elif Classes == None:
with open("Not_Processed.smi", "a") as out:
out.write(line)
else:
with open("Not_Classified.smi", "a") as out:
out.write(line)
def BBClassifier(molSmiles=None, mol=None):
SMARTSLib = os.path.join(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0], "config" ,"SMARTSLibNew.json")
Classes = []
with open(SMARTSLib) as input:
SmartsLib = json.load(input)
if molSmiles!=None and mol==None:
mol = readMol(molSmiles)
if mol != None:
mol.UpdatePropertyCache()
else:
print(molSmiles + " was not processed by rdkit")
return None
elif molSmiles==None and mol==None:
print("ERROR! Input Smiles or Mol object should be provided")
exit()
for bigClass in SmartsLib:
for subClass in SmartsLib[bigClass]:
if __classChecker(SmartsLib[bigClass][subClass]["ShouldContainAtLeastOne"],
SmartsLib[bigClass][subClass]["ShouldAlsoContain"],
SmartsLib[bigClass][subClass]["shouldNotContain"], mol):
Classes.append(bigClass + "_" + subClass)
return Classes
def __classChecker(ShouldContainAtLeastOne, ShouldAlsoContain, shouldNotContain, mol):
match = False
for query1 in ShouldContainAtLeastOne:
if mol.HasSubstructMatch(Chem.MolFromSmarts(query1)):
match = True
break
if match and ShouldAlsoContain:
for query2 in ShouldAlsoContain:
if not mol.HasSubstructMatch(Chem.MolFromSmarts(query2)):
match = False
break
if match and shouldNotContain:
for query3 in shouldNotContain:
q3 =Chem.MolFromSmarts(query3)
#q3.UpdatePropertyCache()
ttt = mol.HasSubstructMatch(q3)
if ttt:
match = False
return match
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Classification of building blocks. Separates provided library into several sublibraries according to the reagents classess.",
epilog="Code implementation: <NAME>, <NAME>\n"
" Laboratoire de Chémoinformatique, Université de Strasbourg.\n\n"
"Knowledge base (SMARTS library): <NAME>, <NAME>, <NAME>, <NAME>\n"
" Institute of Organic Chemistry, National Academy of Sciences of Ukraine\n"
" Kyiv National Taras Shevchenko University\n"
"2021 Strasbourg, Kiev",
prog="SyntOn_Classifier", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-i", "--input", type=str, help="Input SMILES file")
args = parser.parse_args()
main(args)
|
from rdkit import Chem
from rdkit.Chem.rdMolDescriptors import *
import os, json,sys
srcPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(1, srcPath)
from UsefulFunctions import *
def main(args):
for ind, line in enumerate(open(args.input)):
sline = line.strip()
if sline and ind != 0:
if ind % 1000 == 0:
print(str(ind))
Classes = BBClassifier(molSmiles=sline.split()[0])
if Classes:
for Class in Classes:
with open(Class + ".smi", "a") as out:
out.write(line)
elif Classes == None:
with open("Not_Processed.smi", "a") as out:
out.write(line)
else:
with open("Not_Classified.smi", "a") as out:
out.write(line)
def BBClassifier(molSmiles=None, mol=None):
SMARTSLib = os.path.join(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0], "config" ,"SMARTSLibNew.json")
Classes = []
with open(SMARTSLib) as input:
SmartsLib = json.load(input)
if molSmiles!=None and mol==None:
mol = readMol(molSmiles)
if mol != None:
mol.UpdatePropertyCache()
else:
print(molSmiles + " was not processed by rdkit")
return None
elif molSmiles==None and mol==None:
print("ERROR! Input Smiles or Mol object should be provided")
exit()
for bigClass in SmartsLib:
for subClass in SmartsLib[bigClass]:
if __classChecker(SmartsLib[bigClass][subClass]["ShouldContainAtLeastOne"],
SmartsLib[bigClass][subClass]["ShouldAlsoContain"],
SmartsLib[bigClass][subClass]["shouldNotContain"], mol):
Classes.append(bigClass + "_" + subClass)
return Classes
def __classChecker(ShouldContainAtLeastOne, ShouldAlsoContain, shouldNotContain, mol):
match = False
for query1 in ShouldContainAtLeastOne:
if mol.HasSubstructMatch(Chem.MolFromSmarts(query1)):
match = True
break
if match and ShouldAlsoContain:
for query2 in ShouldAlsoContain:
if not mol.HasSubstructMatch(Chem.MolFromSmarts(query2)):
match = False
break
if match and shouldNotContain:
for query3 in shouldNotContain:
q3 =Chem.MolFromSmarts(query3)
#q3.UpdatePropertyCache()
ttt = mol.HasSubstructMatch(q3)
if ttt:
match = False
return match
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Classification of building blocks. Separates provided library into several sublibraries according to the reagents classess.",
epilog="Code implementation: <NAME>, <NAME>\n"
" Laboratoire de Chémoinformatique, Université de Strasbourg.\n\n"
"Knowledge base (SMARTS library): <NAME>, <NAME>, <NAME>, <NAME>\n"
" Institute of Organic Chemistry, National Academy of Sciences of Ukraine\n"
" Kyiv National Taras Shevchenko University\n"
"2021 Strasbourg, Kiev",
prog="SyntOn_Classifier", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-i", "--input", type=str, help="Input SMILES file")
args = parser.parse_args()
main(args)
|
zh
| 0.207444
|
#q3.UpdatePropertyCache()
| 2.40993
| 2
|
python/testData/resolve/multiFile/moduleValueCollision/ModuleValueCollision.py
|
jnthn/intellij-community
| 2
|
6629218
|
# we import a value
from boo import BOO
print(BOO)
# <ref>
|
# we import a value
from boo import BOO
print(BOO)
# <ref>
|
es
| 0.241152
|
# we import a value # <ref>
| 1.550741
| 2
|
haystack/sites.py
|
disqus/django-haystack
| 1
|
6629219
|
<reponame>disqus/django-haystack
import copy
from haystack.exceptions import AlreadyRegistered, NotRegistered, SearchFieldError
class SearchSite(object):
"""
Encapsulates all the indexes that should be available.
This allows you to register indexes on models you don't control (reusable
apps, django.contrib, etc.) as well as customize on a per-site basis what
indexes should be available (different indexes for different sites, same
codebase).
A SearchSite instance should be instantiated in your URLconf, since all
models will have been loaded by that point.
The API intentionally follows that of django.contrib.admin's AdminSite as
much as it makes sense to do.
"""
def __init__(self, backend=None):
self._registry = {}
self._cached_field_mapping = None
self.backend = backend
def register(self, model, index_class=None, backend=None):
"""
Registers a model with the site.
The model should be a Model class, not instances.
If no custom index is provided, a generic SearchIndex will be applied
to the model.
"""
if not index_class:
from haystack.indexes import BasicSearchIndex
index_class = BasicSearchIndex
if not hasattr(model, '_meta'):
raise AttributeError('The model being registered must derive from Model.')
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__class__)
self._registry[model] = index_class(model, backend or self.backend)
self._setup(model, self._registry[model])
def unregister(self, model):
"""
Unregisters a model from the site.
"""
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__class__)
self._teardown(model, self._registry[model])
del(self._registry[model])
def _setup(self, model, index):
index._setup_save(model)
index._setup_delete(model)
def _teardown(self, model, index):
index._teardown_save(model)
index._teardown_delete(model)
def get_index(self, model):
"""Provide the index that're being used for a particular model."""
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__class__)
return self._registry[model]
def get_indexes(self):
"""Provide a dict of all indexes that're being used."""
return self._registry
def get_indexed_models(self):
"""Provide a list of all models being indexed."""
return self._registry.keys()
def all_searchfields(self):
"""
Builds a dictionary of all fields appearing in any of the `SearchIndex`
instances registered with a site.
This is useful when building a schema for an engine. A dictionary is
returned, with each key being a fieldname (or index_fieldname) and the
value being the `SearchField` class assigned to it.
"""
content_field_name = ''
fields = {}
for model, index in self.get_indexes().items():
for field_name, field_object in index.fields.items():
if field_object.document is True:
if content_field_name != '' and content_field_name != field_object.index_fieldname:
raise SearchFieldError("All SearchIndex fields with 'document=True' must use the same fieldname.")
content_field_name = field_object.index_fieldname
if not field_object.index_fieldname in fields:
fields[field_object.index_fieldname] = field_object
fields[field_object.index_fieldname] = copy.copy(field_object)
else:
# If the field types are different, we can mostly
# safely ignore this. The exception is ``MultiValueField``,
# in which case we'll use it instead, copying over the
# values.
if field_object.is_multivalued == True:
old_field = fields[field_object.index_fieldname]
fields[field_object.index_fieldname] = field_object
fields[field_object.index_fieldname] = copy.copy(field_object)
# Switch it so we don't have to dupe the remaining
# checks.
field_object = old_field
# We've already got this field in the list. Ensure that
# what we hand back is a superset of all options that
# affect the schema.
if field_object.indexed is True:
fields[field_object.index_fieldname].indexed = True
if field_object.stored is True:
fields[field_object.index_fieldname].stored = True
if field_object.faceted is True:
fields[field_object.index_fieldname].faceted = True
if field_object.use_template is True:
fields[field_object.index_fieldname].use_template = True
if field_object.null is True:
fields[field_object.index_fieldname].null = True
return fields
def get_index_fieldname(self, fieldname):
"""
Returns the actual name of the field in the index.
If not found, returns the fieldname provided.
This is useful because it handles the case where a ``index_fieldname``
was provided, allowing the user to use the variable name from their
``SearchIndex`` instead of having to remember & use the overridden
name.
"""
if fieldname in self._field_mapping():
return self._field_mapping()[fieldname]['index_fieldname']
else:
return fieldname
def get_facet_field_name(self, fieldname):
"""
Returns the actual name of the facet field in the index.
If not found, returns the fieldname provided.
"""
facet_fieldname = None
reverse_map = {}
for field, info in self._field_mapping().items():
if info['facet_fieldname'] and info['facet_fieldname'] == fieldname:
return info['index_fieldname']
return self.get_index_fieldname(fieldname)
def _field_mapping(self):
mapping = {}
if self._cached_field_mapping:
return self._cached_field_mapping
for model, index in self.get_indexes().items():
for field_name, field_object in index.fields.items():
if field_name in mapping and field_object.index_fieldname != mapping[field_name]['index_fieldname']:
# We've already seen this field in the list. Raise an exception if index_fieldname differs.
raise SearchFieldError("All uses of the '%s' field need to use the same 'index_fieldname' attribute." % field_name)
facet_fieldname = None
if hasattr(field_object, 'facet_for'):
if field_object.facet_for:
facet_fieldname = field_object.facet_for
else:
facet_fieldname = field_object.instance_name
mapping[field_name] = {
'index_fieldname': field_object.index_fieldname,
'facet_fieldname': facet_fieldname,
}
self._cached_field_mapping = mapping
return mapping
def update_object(self, instance):
"""
Updates the instance's data in the index.
A shortcut for updating on the instance's index. Errors from `get_index`
and `update_object` will be allowed to propogate.
"""
return self.get_index(type(instance)).update_object(instance)
def remove_object(self, instance):
"""
Removes the instance's data in the index.
A shortcut for removing on the instance's index. Errors from `get_index`
and `remove_object` will be allowed to propogate.
"""
return self.get_index(type(instance)).remove_object(instance)
# The common case. Feel free to override/replace/define your own in your URLconfs.
site = SearchSite()
|
import copy
from haystack.exceptions import AlreadyRegistered, NotRegistered, SearchFieldError
class SearchSite(object):
"""
Encapsulates all the indexes that should be available.
This allows you to register indexes on models you don't control (reusable
apps, django.contrib, etc.) as well as customize on a per-site basis what
indexes should be available (different indexes for different sites, same
codebase).
A SearchSite instance should be instantiated in your URLconf, since all
models will have been loaded by that point.
The API intentionally follows that of django.contrib.admin's AdminSite as
much as it makes sense to do.
"""
def __init__(self, backend=None):
self._registry = {}
self._cached_field_mapping = None
self.backend = backend
def register(self, model, index_class=None, backend=None):
"""
Registers a model with the site.
The model should be a Model class, not instances.
If no custom index is provided, a generic SearchIndex will be applied
to the model.
"""
if not index_class:
from haystack.indexes import BasicSearchIndex
index_class = BasicSearchIndex
if not hasattr(model, '_meta'):
raise AttributeError('The model being registered must derive from Model.')
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__class__)
self._registry[model] = index_class(model, backend or self.backend)
self._setup(model, self._registry[model])
def unregister(self, model):
"""
Unregisters a model from the site.
"""
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__class__)
self._teardown(model, self._registry[model])
del(self._registry[model])
def _setup(self, model, index):
index._setup_save(model)
index._setup_delete(model)
def _teardown(self, model, index):
index._teardown_save(model)
index._teardown_delete(model)
def get_index(self, model):
"""Provide the index that're being used for a particular model."""
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__class__)
return self._registry[model]
def get_indexes(self):
"""Provide a dict of all indexes that're being used."""
return self._registry
def get_indexed_models(self):
"""Provide a list of all models being indexed."""
return self._registry.keys()
def all_searchfields(self):
"""
Builds a dictionary of all fields appearing in any of the `SearchIndex`
instances registered with a site.
This is useful when building a schema for an engine. A dictionary is
returned, with each key being a fieldname (or index_fieldname) and the
value being the `SearchField` class assigned to it.
"""
content_field_name = ''
fields = {}
for model, index in self.get_indexes().items():
for field_name, field_object in index.fields.items():
if field_object.document is True:
if content_field_name != '' and content_field_name != field_object.index_fieldname:
raise SearchFieldError("All SearchIndex fields with 'document=True' must use the same fieldname.")
content_field_name = field_object.index_fieldname
if not field_object.index_fieldname in fields:
fields[field_object.index_fieldname] = field_object
fields[field_object.index_fieldname] = copy.copy(field_object)
else:
# If the field types are different, we can mostly
# safely ignore this. The exception is ``MultiValueField``,
# in which case we'll use it instead, copying over the
# values.
if field_object.is_multivalued == True:
old_field = fields[field_object.index_fieldname]
fields[field_object.index_fieldname] = field_object
fields[field_object.index_fieldname] = copy.copy(field_object)
# Switch it so we don't have to dupe the remaining
# checks.
field_object = old_field
# We've already got this field in the list. Ensure that
# what we hand back is a superset of all options that
# affect the schema.
if field_object.indexed is True:
fields[field_object.index_fieldname].indexed = True
if field_object.stored is True:
fields[field_object.index_fieldname].stored = True
if field_object.faceted is True:
fields[field_object.index_fieldname].faceted = True
if field_object.use_template is True:
fields[field_object.index_fieldname].use_template = True
if field_object.null is True:
fields[field_object.index_fieldname].null = True
return fields
def get_index_fieldname(self, fieldname):
"""
Returns the actual name of the field in the index.
If not found, returns the fieldname provided.
This is useful because it handles the case where a ``index_fieldname``
was provided, allowing the user to use the variable name from their
``SearchIndex`` instead of having to remember & use the overridden
name.
"""
if fieldname in self._field_mapping():
return self._field_mapping()[fieldname]['index_fieldname']
else:
return fieldname
def get_facet_field_name(self, fieldname):
"""
Returns the actual name of the facet field in the index.
If not found, returns the fieldname provided.
"""
facet_fieldname = None
reverse_map = {}
for field, info in self._field_mapping().items():
if info['facet_fieldname'] and info['facet_fieldname'] == fieldname:
return info['index_fieldname']
return self.get_index_fieldname(fieldname)
def _field_mapping(self):
mapping = {}
if self._cached_field_mapping:
return self._cached_field_mapping
for model, index in self.get_indexes().items():
for field_name, field_object in index.fields.items():
if field_name in mapping and field_object.index_fieldname != mapping[field_name]['index_fieldname']:
# We've already seen this field in the list. Raise an exception if index_fieldname differs.
raise SearchFieldError("All uses of the '%s' field need to use the same 'index_fieldname' attribute." % field_name)
facet_fieldname = None
if hasattr(field_object, 'facet_for'):
if field_object.facet_for:
facet_fieldname = field_object.facet_for
else:
facet_fieldname = field_object.instance_name
mapping[field_name] = {
'index_fieldname': field_object.index_fieldname,
'facet_fieldname': facet_fieldname,
}
self._cached_field_mapping = mapping
return mapping
def update_object(self, instance):
"""
Updates the instance's data in the index.
A shortcut for updating on the instance's index. Errors from `get_index`
and `update_object` will be allowed to propogate.
"""
return self.get_index(type(instance)).update_object(instance)
def remove_object(self, instance):
"""
Removes the instance's data in the index.
A shortcut for removing on the instance's index. Errors from `get_index`
and `remove_object` will be allowed to propogate.
"""
return self.get_index(type(instance)).remove_object(instance)
# The common case. Feel free to override/replace/define your own in your URLconfs.
site = SearchSite()
|
en
| 0.879646
|
Encapsulates all the indexes that should be available. This allows you to register indexes on models you don't control (reusable apps, django.contrib, etc.) as well as customize on a per-site basis what indexes should be available (different indexes for different sites, same codebase). A SearchSite instance should be instantiated in your URLconf, since all models will have been loaded by that point. The API intentionally follows that of django.contrib.admin's AdminSite as much as it makes sense to do. Registers a model with the site. The model should be a Model class, not instances. If no custom index is provided, a generic SearchIndex will be applied to the model. Unregisters a model from the site. Provide the index that're being used for a particular model. Provide a dict of all indexes that're being used. Provide a list of all models being indexed. Builds a dictionary of all fields appearing in any of the `SearchIndex` instances registered with a site. This is useful when building a schema for an engine. A dictionary is returned, with each key being a fieldname (or index_fieldname) and the value being the `SearchField` class assigned to it. # If the field types are different, we can mostly # safely ignore this. The exception is ``MultiValueField``, # in which case we'll use it instead, copying over the # values. # Switch it so we don't have to dupe the remaining # checks. # We've already got this field in the list. Ensure that # what we hand back is a superset of all options that # affect the schema. Returns the actual name of the field in the index. If not found, returns the fieldname provided. This is useful because it handles the case where a ``index_fieldname`` was provided, allowing the user to use the variable name from their ``SearchIndex`` instead of having to remember & use the overridden name. Returns the actual name of the facet field in the index. If not found, returns the fieldname provided. # We've already seen this field in the list. Raise an exception if index_fieldname differs. Updates the instance's data in the index. A shortcut for updating on the instance's index. Errors from `get_index` and `update_object` will be allowed to propogate. Removes the instance's data in the index. A shortcut for removing on the instance's index. Errors from `get_index` and `remove_object` will be allowed to propogate. # The common case. Feel free to override/replace/define your own in your URLconfs.
| 2.271499
| 2
|
FictionTools/amitools/test/unit/path_mgr.py
|
polluks/Puddle-BuildTools
| 38
|
6629220
|
<filename>FictionTools/amitools/test/unit/path_mgr.py
import os
import pytest
from amitools.vamos.path import *
from amitools.vamos.cfgcore import ConfigDict
import logging
from amitools.vamos.log import log_path
log_path.setLevel(logging.DEBUG)
def path_mgr_default_test():
pm = PathManager()
assert pm.get_vol_mgr()
assert pm.get_assign_mgr()
assert pm.get_default_env()
def path_mgr_config_test(tmpdir):
vols_base = str(tmpdir.mkdir("volumes"))
tmpdir.join("volumes").mkdir("work")
sys_path = str(tmpdir.mkdir("sys"))
pm = PathManager()
cfg = ConfigDict(
{
"volumes": ["sys:" + sys_path, "work", "home:~"], # local volume
"assigns": ["c:sys:c+home:c", "libs:sys:libs", "devs:sys:devs"],
"path": {
"command": ["c:", "work:c"],
"cwd": "work:",
"vols_base_dir": vols_base,
"auto_assigns": None,
"auto_volumes": None,
},
}
)
assert pm.parse_config(cfg)
assert pm.setup()
assert pm.get_all_volume_names() == ["sys", "work", "home", "root", "ram"]
assert pm.get_all_assign_names() == ["c", "libs", "devs", "s", "t"]
assert pm.get_cwd() == "work:"
assert pm.get_cmd_paths() == ["c:", "work:c"]
pm.shutdown()
def path_mgr_config_auto_empty_test(tmpdir):
vols_base = str(tmpdir.mkdir("volumes"))
tmpdir.join("volumes").mkdir("work")
sys_path = str(tmpdir.mkdir("sys"))
pm = PathManager()
cfg = ConfigDict(
{
"volumes": ["sys:" + sys_path, "work", "home:~"], # local volume
"assigns": ["c:sys:c+home:c", "libs:sys:libs", "devs:sys:devs"],
"path": {
"command": ["c:", "work:c"],
"cwd": "work:",
"vols_base_dir": vols_base,
"auto_assigns": [],
"auto_volumes": [],
},
}
)
assert pm.parse_config(cfg)
assert pm.setup()
assert pm.get_all_volume_names() == ["sys", "work", "home"]
assert pm.get_all_assign_names() == ["c", "libs", "devs"]
assert pm.get_cwd() == "work:"
assert pm.get_cmd_paths() == ["c:", "work:c"]
pm.shutdown()
def path_mgr_config_esc_sys_test(tmpdir):
sys_path = str(tmpdir.mkdir("sys"))
work_path = str(tmpdir.mkdir("work"))
pm = PathManager()
cfg = ConfigDict(
{
"volumes": ["sys:" + sys_path, "work:" + work_path, "home:~"],
"assigns": ["c:sys:c+home:c", "libs:sys:libs", "devs:sys:devs"],
"path": {
"command": ["::" + work_path],
"cwd": "::~",
"vols_base_dir": None,
"auto_assigns": [],
"auto_volumes": [],
},
}
)
assert pm.parse_config(cfg)
assert pm.setup()
assert pm.get_all_volume_names() == ["sys", "work", "home"]
assert pm.get_all_assign_names() == ["c", "libs", "devs"]
assert pm.get_cwd() == "home:"
assert pm.get_cmd_paths() == ["work:"]
pm.shutdown()
def path_mgr_config_empty_test():
pm = PathManager()
cfg = ConfigDict(
{
"volumes": None,
"assigns": None,
"path": {
"command": ["sys:c"],
"cwd": "sys:",
"vols_base_dir": None,
"auto_volumes": [],
"auto_assigns": [],
},
}
)
assert pm.parse_config(cfg)
assert pm.setup()
assert pm.get_all_volume_names() == ["system"]
assert pm.get_all_assign_names() == ["sys"]
pm.shutdown()
def setup_pm(tmpdir):
root_path = str(tmpdir)
sys_path = str(tmpdir.mkdir("sys"))
work_path = str(tmpdir.mkdir("work"))
pm = PathManager(vols_base_dir=str(tmpdir), auto_volumes=[], auto_assigns=[])
env = pm.get_default_env()
env.set_cwd("root:baz")
env.set_cmd_paths(["a:", "c:"])
vm = pm.get_vol_mgr()
am = pm.get_assign_mgr()
vm.add_volume("root:" + root_path)
vm.add_volume("sys:" + sys_path)
vm.add_volume("work:" + work_path)
am.add_assign("a:b:+c:foo")
am.add_assign("b:root:bla")
am.add_assign("c:sys:c")
am.add_assign("d:a:")
assert pm.setup()
return pm
def path_mgr_valid_prefix_volume_assign_test(tmpdir):
pm = setup_pm(tmpdir)
pv = pm.is_prefix_valid
vp = pm.is_volume_path
ap = pm.is_assign_path
iv = pm.is_valid
# prefix
assert pv(AmiPath("a:"))
assert pv(AmiPath("root:"))
assert not pv(AmiPath("foo:"))
with pytest.raises(AmiPathError):
pv(AmiPath("rel"))
# volume
assert not vp(AmiPath("a:"))
assert vp(AmiPath("root:"))
assert not vp(AmiPath("foo:"))
with pytest.raises(AmiPathError):
vp(AmiPath("rel"))
# assign
assert ap(AmiPath("a:"))
assert not ap(AmiPath("root:"))
assert not ap(AmiPath("foo:"))
with pytest.raises(AmiPathError):
ap(AmiPath("rel"))
# valid
assert iv(AmiPath("a:"))
assert iv(AmiPath("root:"))
assert not iv(AmiPath("foo:"))
assert iv(AmiPath("rel"))
# shutdown
pm.shutdown()
def path_mgr_multi_assigns_test(tmpdir):
pm = setup_pm(tmpdir)
im = pm.is_multi_assign_path
assert im(AmiPath("A:"))
assert not im(AmiPath("b:"))
with pytest.raises(AmiPathError):
im(AmiPath("rel"))
assert not im(AmiPath("root:"))
# recursive
assert im(AmiPath("D:"))
# shutdown
pm.shutdown()
def path_mgr_abspath_test(tmpdir):
pm = setup_pm(tmpdir)
ap = pm.abspath
# abspath of abs
p = AmiPath("foo:bar")
assert ap(p) is p
# abspath of rel
cur_dir = pm.get_default_env().get_cwd()
assert ap("") == cur_dir
assert ap("baz") == cur_dir.join(AmiPath("baz"))
assert ap("/baz") == cur_dir.join(AmiPath("/baz"))
# invalid rel
with pytest.raises(AmiPathError):
env = AmiPathEnv(cwd="foo:")
ap("/", env=env)
# assign
env = AmiPathEnv(cwd="blub:")
assert ap(AmiPath("rel"), env=env) == env.get_cwd().join(AmiPath("rel"))
# other volpath
env = AmiPathEnv(cwd="work:blub")
assert ap("baz", env=env) == env.get_cwd().join(AmiPath("baz"))
# shutdown
pm.shutdown()
def path_mgr_volpath_test(tmpdir):
pm = setup_pm(tmpdir)
env = pm.get_default_env()
cur_dir = env.get_cwd()
vp = pm.volpath
# relpath
assert vp(AmiPath()) == cur_dir
assert vp(AmiPath("foo")) == cur_dir.join(AmiPath("foo"))
# relpath own env
cwd = AmiPath("work:bar")
env = AmiPathEnv(cwd=cwd)
assert vp(AmiPath(), env=env) == cwd
assert vp(AmiPath("foo"), env=env) == cwd.join(AmiPath("foo"))
# invalid relpath
with pytest.raises(AmiPathError):
env = AmiPathEnv(cwd="foo:")
assert vp("/", env=env) == cwd
# volpath
assert vp(AmiPath("work:bla")) == AmiPath("work:bla")
# multi assign
with pytest.raises(AmiPathError):
vp(AmiPath("a:bla"))
# assign
assert vp(AmiPath("b:foo")) == AmiPath("root:bla/foo")
# unknown prefix
assert vp(AmiPath("what:is/this")) is None
# strict: unknown prefix
with pytest.raises(AmiPathError):
vp("what:is/this", strict=True)
# shutdown
pm.shutdown()
def path_mgr_volpaths_test(tmpdir):
pm = setup_pm(tmpdir)
env = pm.get_default_env()
cur_dir = env.get_cwd()
vp = pm.volpaths
# relpath
assert vp(AmiPath()) == [cur_dir]
assert vp("foo") == [cur_dir.join(AmiPath("foo"))]
# relpath own env
cwd = AmiPath("work:bar")
env = AmiPathEnv(cwd=cwd)
assert vp(AmiPath(), env=env) == [cwd]
assert vp(AmiPath("foo"), env=env) == [cwd.join(AmiPath("foo"))]
# invalid relpath
with pytest.raises(AmiPathError):
env = AmiPathEnv(cwd="foo:")
assert vp("/", env=env) == cwd
# volpath
assert vp(AmiPath("work:bla")) == [AmiPath("work:bla")]
# multi assign
assert vp(AmiPath("a:bla")) == [AmiPath("root:bla/bla"), AmiPath("sys:c/foo/bla")]
# assign
assert vp(AmiPath("b:foo")) == [AmiPath("root:bla/foo")]
# unknown prefix
assert vp("what:is/this") == []
# strict: unknown prefix
with pytest.raises(AmiPathError):
vp("what:is/this", strict=True)
# shutdown
pm.shutdown()
def path_mgr_resolve_assigns_test(tmpdir):
pm = setup_pm(tmpdir)
env = pm.get_default_env()
ra = pm.resolve_assigns
# relpath
assert ra(AmiPath()) == AmiPath()
assert ra(AmiPath("foo")) == AmiPath("foo")
# volpath
assert ra(AmiPath("work:bla")) == AmiPath("work:bla")
# multi assign - non recursive
assert ra(AmiPath("a:bla")) == [AmiPath("b:bla"), AmiPath("c:foo/bla")]
# multi assign - recursive
assert ra(AmiPath("a:bla"), True) == [
AmiPath("root:bla/bla"),
AmiPath("sys:c/foo/bla"),
]
# assign
assert ra(AmiPath("b:foo")) == AmiPath("root:bla/foo")
assert ra(AmiPath("d:baz")) == AmiPath("a:baz")
# assign recursive
assert ra(AmiPath("d:baz"), True) == [
AmiPath("root:bla/baz"),
AmiPath("sys:c/foo/baz"),
]
# shutdown
pm.shutdown()
def path_mgr_cmdpaths_test(tmpdir):
pm = setup_pm(tmpdir)
env = pm.get_default_env()
cp = pm.cmdpaths
cur_dir = env.get_cwd()
# relpath
with pytest.raises(AmiPathError):
cp(AmiPath())
p = AmiPath("bla/blub")
assert cp(p) == [cur_dir.join(p)]
assert cp(p, make_volpaths=False) == [p]
# invalid command path
p = AmiPath("bla/blub/")
with pytest.raises(AmiPathError):
cp(p)
# abspath
with pytest.raises(AmiPathError):
cp(AmiPath("foo:"))
with pytest.raises(AmiPathError):
cp(AmiPath("foo:bla/"))
p = AmiPath("root:cmd")
assert cp(p) == [p]
assert cp(p, make_volpaths=False) == [p]
# name only
p = AmiPath("cmd")
assert cp(p) == [
AmiPath("root:baz/cmd"),
AmiPath("root:bla/cmd"),
AmiPath("sys:c/foo/cmd"),
AmiPath("sys:c/cmd"),
]
assert cp(p, prepend_cur_dir=False) == [
AmiPath("root:bla/cmd"),
AmiPath("sys:c/foo/cmd"),
AmiPath("sys:c/cmd"),
]
assert cp(p, make_volpaths=False) == [
AmiPath("root:baz/cmd"),
AmiPath("a:cmd"),
AmiPath("c:cmd"),
]
# shutdown
pm.shutdown()
def get_volume_sys_path(pm, vol_name):
vol = pm.get_volume(vol_name)
return vol.get_path()
def path_mgr_to_sys_path_test(tmpdir):
pm = setup_pm(tmpdir)
tsp = pm.to_sys_path
sys_sys_path = get_volume_sys_path(pm, "sys")
sys_root_path = get_volume_sys_path(pm, "root")
# vol path
assert tsp("sys:") == sys_sys_path
# assign path
assert tsp("c:") == os.path.join(sys_sys_path, "c")
# relpath
assert tsp("") == os.path.join(sys_root_path, "baz")
assert tsp("what/next") == os.path.join(sys_root_path, "baz", "what", "next")
# relpath env
env = AmiPathEnv(cwd="sys:")
assert tsp("", env=env) == os.path.join(sys_sys_path)
assert tsp("foo/bar", env=env) == os.path.join(sys_sys_path, "foo", "bar")
# invalid relpath
with pytest.raises(AmiPathError):
tsp("/", env=env)
# unknown prefix
assert tsp("unknown:") is None
with pytest.raises(AmiPathError):
tsp("unknown:", strict=True)
# shutdown
pm.shutdown()
def path_mgr_from_sys_path_test(tmpdir):
pm = setup_pm(tmpdir)
fsp = pm.from_sys_path
sys_sys_path = get_volume_sys_path(pm, "sys")
sys_root_path = get_volume_sys_path(pm, "root")
assert pm.get_vol_mgr().add_volume("cwd:.")
sys_cwd_path = get_volume_sys_path(pm, "cwd")
# abs sys path
assert fsp(sys_sys_path) == "sys:"
assert fsp(sys_root_path) == "root:"
assert fsp(sys_cwd_path) == "cwd:"
assert fsp(os.path.join(sys_sys_path, "my", "Path")) == "sys:my/Path"
# rel sys path
assert fsp(".") == "cwd:"
assert fsp("my/Path") == "cwd:my/Path"
# can't map
assert fsp("..") is None
with pytest.raises(SysPathError):
fsp("..", strict=True)
# shutdown
pm.shutdown()
def path_mgr_resolve_esc_sys_path_test(tmpdir):
pm = setup_pm(tmpdir)
sys_sys_path = get_volume_sys_path(pm, "sys")
assert pm.get_vol_mgr().add_volume("cwd:.")
sys_cwd_path = get_volume_sys_path(pm, "cwd")
resp = pm.resolve_esc_sys_path
# ami path
assert resp("bla:") == AmiPath("bla:")
assert resp("rel") == AmiPath("rel")
assert resp("") == AmiPath()
# esc sys path
# invalid empty
with pytest.raises(AmiPathError):
resp("::")
# valid abs
assert resp("::" + sys_sys_path) == "sys:"
# valid rel
assert resp("::.") == "cwd:"
# invalid sys
assert resp("::..") is None
with pytest.raises(SysPathError):
resp("::..", strict=True)
# shutdown
pm.shutdown()
def path_mgr_create_env_test(tmpdir):
pm = setup_pm(tmpdir)
sys_sys_path = get_volume_sys_path(pm, "sys")
assert pm.get_vol_mgr().add_volume("cwd:.")
sys_cwd_path = get_volume_sys_path(pm, "cwd")
def_env = pm.get_default_env()
# create clone of default env
env = pm.create_env()
assert env == def_env
# set cwd
env = pm.create_env(cwd="work:")
assert env.get_cwd() == "work:"
env.set_cwd("root:")
assert env.get_cwd() == "root:"
assert env.get_cmd_paths() == def_env.get_cmd_paths()
# set cmd_paths
env = pm.create_env(cmd_paths=["b:"])
assert env.get_cwd() == "root:baz"
assert env.get_cmd_paths() == ["b:"]
# set both
env = pm.create_env(cwd="work:bla", cmd_paths=["d:"])
assert env.get_cwd() == "work:bla"
assert env.get_cmd_paths() == ["d:"]
# shutdown
pm.shutdown()
def path_mgr_auto_volume_assign_test(tmpdir):
pm = PathManager(vols_base_dir=str(tmpdir))
assert pm.setup()
vm = pm.get_vol_mgr()
assert vm.is_volume("system")
assert vm.is_volume("root")
assert vm.is_volume("ram")
am = pm.get_assign_mgr()
assert am.is_assign("sys")
assert am.is_assign("c")
assert am.is_assign("t")
assert am.is_assign("s")
assert am.is_assign("devs")
assert am.is_assign("libs")
pm.shutdown()
|
<filename>FictionTools/amitools/test/unit/path_mgr.py
import os
import pytest
from amitools.vamos.path import *
from amitools.vamos.cfgcore import ConfigDict
import logging
from amitools.vamos.log import log_path
log_path.setLevel(logging.DEBUG)
def path_mgr_default_test():
pm = PathManager()
assert pm.get_vol_mgr()
assert pm.get_assign_mgr()
assert pm.get_default_env()
def path_mgr_config_test(tmpdir):
vols_base = str(tmpdir.mkdir("volumes"))
tmpdir.join("volumes").mkdir("work")
sys_path = str(tmpdir.mkdir("sys"))
pm = PathManager()
cfg = ConfigDict(
{
"volumes": ["sys:" + sys_path, "work", "home:~"], # local volume
"assigns": ["c:sys:c+home:c", "libs:sys:libs", "devs:sys:devs"],
"path": {
"command": ["c:", "work:c"],
"cwd": "work:",
"vols_base_dir": vols_base,
"auto_assigns": None,
"auto_volumes": None,
},
}
)
assert pm.parse_config(cfg)
assert pm.setup()
assert pm.get_all_volume_names() == ["sys", "work", "home", "root", "ram"]
assert pm.get_all_assign_names() == ["c", "libs", "devs", "s", "t"]
assert pm.get_cwd() == "work:"
assert pm.get_cmd_paths() == ["c:", "work:c"]
pm.shutdown()
def path_mgr_config_auto_empty_test(tmpdir):
vols_base = str(tmpdir.mkdir("volumes"))
tmpdir.join("volumes").mkdir("work")
sys_path = str(tmpdir.mkdir("sys"))
pm = PathManager()
cfg = ConfigDict(
{
"volumes": ["sys:" + sys_path, "work", "home:~"], # local volume
"assigns": ["c:sys:c+home:c", "libs:sys:libs", "devs:sys:devs"],
"path": {
"command": ["c:", "work:c"],
"cwd": "work:",
"vols_base_dir": vols_base,
"auto_assigns": [],
"auto_volumes": [],
},
}
)
assert pm.parse_config(cfg)
assert pm.setup()
assert pm.get_all_volume_names() == ["sys", "work", "home"]
assert pm.get_all_assign_names() == ["c", "libs", "devs"]
assert pm.get_cwd() == "work:"
assert pm.get_cmd_paths() == ["c:", "work:c"]
pm.shutdown()
def path_mgr_config_esc_sys_test(tmpdir):
sys_path = str(tmpdir.mkdir("sys"))
work_path = str(tmpdir.mkdir("work"))
pm = PathManager()
cfg = ConfigDict(
{
"volumes": ["sys:" + sys_path, "work:" + work_path, "home:~"],
"assigns": ["c:sys:c+home:c", "libs:sys:libs", "devs:sys:devs"],
"path": {
"command": ["::" + work_path],
"cwd": "::~",
"vols_base_dir": None,
"auto_assigns": [],
"auto_volumes": [],
},
}
)
assert pm.parse_config(cfg)
assert pm.setup()
assert pm.get_all_volume_names() == ["sys", "work", "home"]
assert pm.get_all_assign_names() == ["c", "libs", "devs"]
assert pm.get_cwd() == "home:"
assert pm.get_cmd_paths() == ["work:"]
pm.shutdown()
def path_mgr_config_empty_test():
pm = PathManager()
cfg = ConfigDict(
{
"volumes": None,
"assigns": None,
"path": {
"command": ["sys:c"],
"cwd": "sys:",
"vols_base_dir": None,
"auto_volumes": [],
"auto_assigns": [],
},
}
)
assert pm.parse_config(cfg)
assert pm.setup()
assert pm.get_all_volume_names() == ["system"]
assert pm.get_all_assign_names() == ["sys"]
pm.shutdown()
def setup_pm(tmpdir):
root_path = str(tmpdir)
sys_path = str(tmpdir.mkdir("sys"))
work_path = str(tmpdir.mkdir("work"))
pm = PathManager(vols_base_dir=str(tmpdir), auto_volumes=[], auto_assigns=[])
env = pm.get_default_env()
env.set_cwd("root:baz")
env.set_cmd_paths(["a:", "c:"])
vm = pm.get_vol_mgr()
am = pm.get_assign_mgr()
vm.add_volume("root:" + root_path)
vm.add_volume("sys:" + sys_path)
vm.add_volume("work:" + work_path)
am.add_assign("a:b:+c:foo")
am.add_assign("b:root:bla")
am.add_assign("c:sys:c")
am.add_assign("d:a:")
assert pm.setup()
return pm
def path_mgr_valid_prefix_volume_assign_test(tmpdir):
pm = setup_pm(tmpdir)
pv = pm.is_prefix_valid
vp = pm.is_volume_path
ap = pm.is_assign_path
iv = pm.is_valid
# prefix
assert pv(AmiPath("a:"))
assert pv(AmiPath("root:"))
assert not pv(AmiPath("foo:"))
with pytest.raises(AmiPathError):
pv(AmiPath("rel"))
# volume
assert not vp(AmiPath("a:"))
assert vp(AmiPath("root:"))
assert not vp(AmiPath("foo:"))
with pytest.raises(AmiPathError):
vp(AmiPath("rel"))
# assign
assert ap(AmiPath("a:"))
assert not ap(AmiPath("root:"))
assert not ap(AmiPath("foo:"))
with pytest.raises(AmiPathError):
ap(AmiPath("rel"))
# valid
assert iv(AmiPath("a:"))
assert iv(AmiPath("root:"))
assert not iv(AmiPath("foo:"))
assert iv(AmiPath("rel"))
# shutdown
pm.shutdown()
def path_mgr_multi_assigns_test(tmpdir):
pm = setup_pm(tmpdir)
im = pm.is_multi_assign_path
assert im(AmiPath("A:"))
assert not im(AmiPath("b:"))
with pytest.raises(AmiPathError):
im(AmiPath("rel"))
assert not im(AmiPath("root:"))
# recursive
assert im(AmiPath("D:"))
# shutdown
pm.shutdown()
def path_mgr_abspath_test(tmpdir):
pm = setup_pm(tmpdir)
ap = pm.abspath
# abspath of abs
p = AmiPath("foo:bar")
assert ap(p) is p
# abspath of rel
cur_dir = pm.get_default_env().get_cwd()
assert ap("") == cur_dir
assert ap("baz") == cur_dir.join(AmiPath("baz"))
assert ap("/baz") == cur_dir.join(AmiPath("/baz"))
# invalid rel
with pytest.raises(AmiPathError):
env = AmiPathEnv(cwd="foo:")
ap("/", env=env)
# assign
env = AmiPathEnv(cwd="blub:")
assert ap(AmiPath("rel"), env=env) == env.get_cwd().join(AmiPath("rel"))
# other volpath
env = AmiPathEnv(cwd="work:blub")
assert ap("baz", env=env) == env.get_cwd().join(AmiPath("baz"))
# shutdown
pm.shutdown()
def path_mgr_volpath_test(tmpdir):
pm = setup_pm(tmpdir)
env = pm.get_default_env()
cur_dir = env.get_cwd()
vp = pm.volpath
# relpath
assert vp(AmiPath()) == cur_dir
assert vp(AmiPath("foo")) == cur_dir.join(AmiPath("foo"))
# relpath own env
cwd = AmiPath("work:bar")
env = AmiPathEnv(cwd=cwd)
assert vp(AmiPath(), env=env) == cwd
assert vp(AmiPath("foo"), env=env) == cwd.join(AmiPath("foo"))
# invalid relpath
with pytest.raises(AmiPathError):
env = AmiPathEnv(cwd="foo:")
assert vp("/", env=env) == cwd
# volpath
assert vp(AmiPath("work:bla")) == AmiPath("work:bla")
# multi assign
with pytest.raises(AmiPathError):
vp(AmiPath("a:bla"))
# assign
assert vp(AmiPath("b:foo")) == AmiPath("root:bla/foo")
# unknown prefix
assert vp(AmiPath("what:is/this")) is None
# strict: unknown prefix
with pytest.raises(AmiPathError):
vp("what:is/this", strict=True)
# shutdown
pm.shutdown()
def path_mgr_volpaths_test(tmpdir):
pm = setup_pm(tmpdir)
env = pm.get_default_env()
cur_dir = env.get_cwd()
vp = pm.volpaths
# relpath
assert vp(AmiPath()) == [cur_dir]
assert vp("foo") == [cur_dir.join(AmiPath("foo"))]
# relpath own env
cwd = AmiPath("work:bar")
env = AmiPathEnv(cwd=cwd)
assert vp(AmiPath(), env=env) == [cwd]
assert vp(AmiPath("foo"), env=env) == [cwd.join(AmiPath("foo"))]
# invalid relpath
with pytest.raises(AmiPathError):
env = AmiPathEnv(cwd="foo:")
assert vp("/", env=env) == cwd
# volpath
assert vp(AmiPath("work:bla")) == [AmiPath("work:bla")]
# multi assign
assert vp(AmiPath("a:bla")) == [AmiPath("root:bla/bla"), AmiPath("sys:c/foo/bla")]
# assign
assert vp(AmiPath("b:foo")) == [AmiPath("root:bla/foo")]
# unknown prefix
assert vp("what:is/this") == []
# strict: unknown prefix
with pytest.raises(AmiPathError):
vp("what:is/this", strict=True)
# shutdown
pm.shutdown()
def path_mgr_resolve_assigns_test(tmpdir):
pm = setup_pm(tmpdir)
env = pm.get_default_env()
ra = pm.resolve_assigns
# relpath
assert ra(AmiPath()) == AmiPath()
assert ra(AmiPath("foo")) == AmiPath("foo")
# volpath
assert ra(AmiPath("work:bla")) == AmiPath("work:bla")
# multi assign - non recursive
assert ra(AmiPath("a:bla")) == [AmiPath("b:bla"), AmiPath("c:foo/bla")]
# multi assign - recursive
assert ra(AmiPath("a:bla"), True) == [
AmiPath("root:bla/bla"),
AmiPath("sys:c/foo/bla"),
]
# assign
assert ra(AmiPath("b:foo")) == AmiPath("root:bla/foo")
assert ra(AmiPath("d:baz")) == AmiPath("a:baz")
# assign recursive
assert ra(AmiPath("d:baz"), True) == [
AmiPath("root:bla/baz"),
AmiPath("sys:c/foo/baz"),
]
# shutdown
pm.shutdown()
def path_mgr_cmdpaths_test(tmpdir):
pm = setup_pm(tmpdir)
env = pm.get_default_env()
cp = pm.cmdpaths
cur_dir = env.get_cwd()
# relpath
with pytest.raises(AmiPathError):
cp(AmiPath())
p = AmiPath("bla/blub")
assert cp(p) == [cur_dir.join(p)]
assert cp(p, make_volpaths=False) == [p]
# invalid command path
p = AmiPath("bla/blub/")
with pytest.raises(AmiPathError):
cp(p)
# abspath
with pytest.raises(AmiPathError):
cp(AmiPath("foo:"))
with pytest.raises(AmiPathError):
cp(AmiPath("foo:bla/"))
p = AmiPath("root:cmd")
assert cp(p) == [p]
assert cp(p, make_volpaths=False) == [p]
# name only
p = AmiPath("cmd")
assert cp(p) == [
AmiPath("root:baz/cmd"),
AmiPath("root:bla/cmd"),
AmiPath("sys:c/foo/cmd"),
AmiPath("sys:c/cmd"),
]
assert cp(p, prepend_cur_dir=False) == [
AmiPath("root:bla/cmd"),
AmiPath("sys:c/foo/cmd"),
AmiPath("sys:c/cmd"),
]
assert cp(p, make_volpaths=False) == [
AmiPath("root:baz/cmd"),
AmiPath("a:cmd"),
AmiPath("c:cmd"),
]
# shutdown
pm.shutdown()
def get_volume_sys_path(pm, vol_name):
vol = pm.get_volume(vol_name)
return vol.get_path()
def path_mgr_to_sys_path_test(tmpdir):
pm = setup_pm(tmpdir)
tsp = pm.to_sys_path
sys_sys_path = get_volume_sys_path(pm, "sys")
sys_root_path = get_volume_sys_path(pm, "root")
# vol path
assert tsp("sys:") == sys_sys_path
# assign path
assert tsp("c:") == os.path.join(sys_sys_path, "c")
# relpath
assert tsp("") == os.path.join(sys_root_path, "baz")
assert tsp("what/next") == os.path.join(sys_root_path, "baz", "what", "next")
# relpath env
env = AmiPathEnv(cwd="sys:")
assert tsp("", env=env) == os.path.join(sys_sys_path)
assert tsp("foo/bar", env=env) == os.path.join(sys_sys_path, "foo", "bar")
# invalid relpath
with pytest.raises(AmiPathError):
tsp("/", env=env)
# unknown prefix
assert tsp("unknown:") is None
with pytest.raises(AmiPathError):
tsp("unknown:", strict=True)
# shutdown
pm.shutdown()
def path_mgr_from_sys_path_test(tmpdir):
pm = setup_pm(tmpdir)
fsp = pm.from_sys_path
sys_sys_path = get_volume_sys_path(pm, "sys")
sys_root_path = get_volume_sys_path(pm, "root")
assert pm.get_vol_mgr().add_volume("cwd:.")
sys_cwd_path = get_volume_sys_path(pm, "cwd")
# abs sys path
assert fsp(sys_sys_path) == "sys:"
assert fsp(sys_root_path) == "root:"
assert fsp(sys_cwd_path) == "cwd:"
assert fsp(os.path.join(sys_sys_path, "my", "Path")) == "sys:my/Path"
# rel sys path
assert fsp(".") == "cwd:"
assert fsp("my/Path") == "cwd:my/Path"
# can't map
assert fsp("..") is None
with pytest.raises(SysPathError):
fsp("..", strict=True)
# shutdown
pm.shutdown()
def path_mgr_resolve_esc_sys_path_test(tmpdir):
pm = setup_pm(tmpdir)
sys_sys_path = get_volume_sys_path(pm, "sys")
assert pm.get_vol_mgr().add_volume("cwd:.")
sys_cwd_path = get_volume_sys_path(pm, "cwd")
resp = pm.resolve_esc_sys_path
# ami path
assert resp("bla:") == AmiPath("bla:")
assert resp("rel") == AmiPath("rel")
assert resp("") == AmiPath()
# esc sys path
# invalid empty
with pytest.raises(AmiPathError):
resp("::")
# valid abs
assert resp("::" + sys_sys_path) == "sys:"
# valid rel
assert resp("::.") == "cwd:"
# invalid sys
assert resp("::..") is None
with pytest.raises(SysPathError):
resp("::..", strict=True)
# shutdown
pm.shutdown()
def path_mgr_create_env_test(tmpdir):
pm = setup_pm(tmpdir)
sys_sys_path = get_volume_sys_path(pm, "sys")
assert pm.get_vol_mgr().add_volume("cwd:.")
sys_cwd_path = get_volume_sys_path(pm, "cwd")
def_env = pm.get_default_env()
# create clone of default env
env = pm.create_env()
assert env == def_env
# set cwd
env = pm.create_env(cwd="work:")
assert env.get_cwd() == "work:"
env.set_cwd("root:")
assert env.get_cwd() == "root:"
assert env.get_cmd_paths() == def_env.get_cmd_paths()
# set cmd_paths
env = pm.create_env(cmd_paths=["b:"])
assert env.get_cwd() == "root:baz"
assert env.get_cmd_paths() == ["b:"]
# set both
env = pm.create_env(cwd="work:bla", cmd_paths=["d:"])
assert env.get_cwd() == "work:bla"
assert env.get_cmd_paths() == ["d:"]
# shutdown
pm.shutdown()
def path_mgr_auto_volume_assign_test(tmpdir):
pm = PathManager(vols_base_dir=str(tmpdir))
assert pm.setup()
vm = pm.get_vol_mgr()
assert vm.is_volume("system")
assert vm.is_volume("root")
assert vm.is_volume("ram")
am = pm.get_assign_mgr()
assert am.is_assign("sys")
assert am.is_assign("c")
assert am.is_assign("t")
assert am.is_assign("s")
assert am.is_assign("devs")
assert am.is_assign("libs")
pm.shutdown()
|
en
| 0.554457
|
# local volume # local volume # prefix # volume # assign # valid # shutdown # recursive # shutdown # abspath of abs # abspath of rel # invalid rel # assign # other volpath # shutdown # relpath # relpath own env # invalid relpath # volpath # multi assign # assign # unknown prefix # strict: unknown prefix # shutdown # relpath # relpath own env # invalid relpath # volpath # multi assign # assign # unknown prefix # strict: unknown prefix # shutdown # relpath # volpath # multi assign - non recursive # multi assign - recursive # assign # assign recursive # shutdown # relpath # invalid command path # abspath # name only # shutdown # vol path # assign path # relpath # relpath env # invalid relpath # unknown prefix # shutdown # abs sys path # rel sys path # can't map # shutdown # ami path # esc sys path # invalid empty # valid abs # valid rel # invalid sys # shutdown # create clone of default env # set cwd # set cmd_paths # set both # shutdown
| 2.002767
| 2
|
tools/populate_default.py
|
jalvinronnie/SU-Portal
| 0
|
6629221
|
from app.models import User
from simpleticket.models import Priority
from simpleticket.models import Project
from simpleticket.models import Status
project = Project.objects.create(name='default_project')
project.is_default = True
project.save()
priority = Priority.objects.create(name='default_priority')
priority.is_default = True
priority.value = '1'
priority.save()
status = Status.objects.create(name='default_status')
status.name = 'open: Visible'
status.is_default = True
status.save()
status = Status.objects.create(name='default_status')
status.name = 'open: Hidden'
status.is_default = False
status.save()
status = Status.objects.create(name='default_status')
status.name = 'closed'
status.is_default = False
status.save()
user, created = User.objects.get_or_create(username='pseudo')
user.set_password('<PASSWORD>')
user.phone = 7738367222
user.email = '<EMAIL>'
user.first_name = 'pseudo'
user.last_name = 'saab'
user.is_superuser = True
user.is_staff = True
user.save()
|
from app.models import User
from simpleticket.models import Priority
from simpleticket.models import Project
from simpleticket.models import Status
project = Project.objects.create(name='default_project')
project.is_default = True
project.save()
priority = Priority.objects.create(name='default_priority')
priority.is_default = True
priority.value = '1'
priority.save()
status = Status.objects.create(name='default_status')
status.name = 'open: Visible'
status.is_default = True
status.save()
status = Status.objects.create(name='default_status')
status.name = 'open: Hidden'
status.is_default = False
status.save()
status = Status.objects.create(name='default_status')
status.name = 'closed'
status.is_default = False
status.save()
user, created = User.objects.get_or_create(username='pseudo')
user.set_password('<PASSWORD>')
user.phone = 7738367222
user.email = '<EMAIL>'
user.first_name = 'pseudo'
user.last_name = 'saab'
user.is_superuser = True
user.is_staff = True
user.save()
|
none
| 1
| 2.189544
| 2
|
|
gammapy/irf/psf_table.py
|
watsonjj/gammapy
| 0
|
6629222
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy.io import fits
from astropy import units as u
from astropy.coordinates import Angle
from astropy.utils import lazyproperty
from scipy.integrate import cumtrapz
from ..utils.interpolation import ScaledRegularGridInterpolator
from ..utils.gauss import Gauss2DPDF
from ..utils.scripts import make_path
from ..utils.array import array_stats_str
from ..utils.energy import Energy
__all__ = ["TablePSF", "EnergyDependentTablePSF"]
log = logging.getLogger(__name__)
class TablePSF:
r"""Radially-symmetric table PSF.
Parameters
----------
rad : `~astropy.units.Quantity` with angle units
Offset wrt source position
psf_value : `~astropy.units.Quantity` with sr^-1 units
PSF value array
interp_kwargs : dict
Keyword arguments passed to `ScaledRegularGridInterpolator`
"""
def __init__(self, rad, psf_value, interp_kwargs=None):
self.rad = Angle(rad).to("rad")
self.psf_value = u.Quantity(psf_value).to("sr^-1")
self._interp_kwargs = interp_kwargs or {}
@lazyproperty
def _interpolate(self):
points = (self.rad.value,)
return ScaledRegularGridInterpolator(
points=points, values=self.psf_value, **self._interp_kwargs
)
@lazyproperty
def _interpolate_containment(self):
if self.rad[0] > 0:
rad = self.rad.insert(0, 0)
else:
rad = self.rad
rad_drad = 2 * np.pi * rad * self.evaluate(rad)
values = cumtrapz(rad_drad.to_value("rad-1"), rad.to_value("rad"), initial=0)
return ScaledRegularGridInterpolator(points=(rad,), values=values, fill_value=1)
@classmethod
def from_shape(cls, shape, width, rad):
"""Make TablePSF objects with commonly used shapes.
This function is mostly useful for examples and testing.
Parameters
----------
shape : {'disk', 'gauss'}
PSF shape.
width : `~astropy.units.Quantity` with angle units
PSF width angle (radius for disk, sigma for Gauss).
rad : `~astropy.units.Quantity` with angle units
Offset angle
Returns
-------
psf : `TablePSF`
Table PSF
Examples
--------
>>> import numpy as np
>>> from astropy.coordinates import Angle
>>> from gammapy.irf import TablePSF
>>> TablePSF.from_shape(shape='gauss', width='0.2 deg',
... rad=Angle(np.linspace(0, 0.7, 100), 'deg'))
"""
width = Angle(width)
rad = Angle(rad)
if shape == "disk":
amplitude = 1 / (np.pi * width.radian ** 2)
psf_value = np.where(rad < width, amplitude, 0)
elif shape == "gauss":
gauss2d_pdf = Gauss2DPDF(sigma=width.radian)
psf_value = gauss2d_pdf(rad.radian)
else:
raise ValueError("Invalid shape: {}".format(shape))
psf_value = u.Quantity(psf_value, "sr^-1")
return cls(rad, psf_value)
def info(self):
"""Print basic info."""
ss = array_stats_str(self.rad.deg, "offset")
ss += "integral = {}\n".format(self.integral())
for containment in [68, 80, 95]:
radius = self.containment_radius(0.01 * containment)
ss += "containment radius {} deg for {}%\n".format(radius.deg, containment)
return ss
def evaluate(self, rad):
r"""Evaluate PSF.
The following PSF quantities are available:
* 'dp_domega': PDF per 2-dim solid angle :math:`\Omega` in sr^-1
.. math:: \frac{dP}{d\Omega}
Parameters
----------
rad : `~astropy.coordinates.Angle`
Offset wrt source position
Returns
-------
psf_value : `~astropy.units.Quantity`
PSF value
"""
rad = np.atleast_1d(u.Quantity(rad, "rad").value)
return self._interpolate((rad,))
def containment(self, rad_max):
"""Compute PSF containment fraction.
Parameters
----------
rad_max : `~astropy.units.Quantity`
Offset angle range
Returns
-------
integral : float
PSF integral
"""
rad = np.atleast_1d(u.Quantity(rad_max, "rad").value)
return self._interpolate_containment((rad,))
def containment_radius(self, fraction):
"""Containment radius.
Parameters
----------
fraction : array_like
Containment fraction (range 0 .. 1)
Returns
-------
rad : `~astropy.coordinates.Angle`
Containment radius angle
"""
rad_max = Angle(np.linspace(0, self.rad[-1].value, 10 * len(self.rad)), "rad")
containment = self.containment(rad_max=rad_max)
fraction = np.atleast_1d(fraction)
fraction_idx = np.argmin(np.abs(containment - fraction[:, np.newaxis]), axis=1)
return rad_max[fraction_idx].to("deg")
def normalize(self):
"""Normalize PSF to unit integral.
Computes the total PSF integral via the :math:`dP / dr` spline
and then divides the :math:`dP / dr` array.
"""
integral = self.containment(self.rad[-1])
self.psf_value /= integral
def broaden(self, factor, normalize=True):
r"""Broaden PSF by scaling the offset array.
For a broadening factor :math:`f` and the offset
array :math:`r`, the offset array scaled
in the following way:
.. math::
r_{new} = f \times r_{old}
\frac{dP}{dr}(r_{new}) = \frac{dP}{dr}(r_{old})
Parameters
----------
factor : float
Broadening factor
normalize : bool
Normalize PSF after broadening
"""
self.rad *= factor
self._setup_interpolators()
if normalize:
self.normalize()
def plot_psf_vs_rad(self, ax=None, **kwargs):
"""Plot PSF vs radius.
Parameters
----------
ax : ``
kwargs : dict
Keyword arguments passed to `matplotlib.pyplot.plot`
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
ax.plot(self.rad.to_value("deg"), self.psf_value.to_value("sr-1"), **kwargs)
ax.set_yscale("log")
ax.set_xlabel("Radius (deg)")
ax.set_ylabel("PSF (sr-1)")
class EnergyDependentTablePSF:
"""Energy-dependent radially-symmetric table PSF (``gtpsf`` format).
TODO: add references and explanations.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy (1-dim)
rad : `~astropy.units.Quantity` with angle units
Offset angle wrt source position (1-dim)
exposure : `~astropy.units.Quantity`
Exposure (1-dim)
psf_value : `~astropy.units.Quantity`
PSF (2-dim with axes: psf[energy_index, offset_index]
interp_kwargs : dict
Interpolation keyword arguments pass to `ScaledRegularGridInterpolator`.
"""
def __init__(self, energy, rad, exposure=None, psf_value=None, interp_kwargs=None):
self.energy = u.Quantity(energy).to("GeV")
self.rad = u.Quantity(rad).to("radian")
if exposure is None:
self.exposure = u.Quantity(np.ones(len(energy)), "cm^2 s")
else:
self.exposure = u.Quantity(exposure).to("cm^2 s")
if psf_value is None:
self.psf_value = u.Quantity(np.zeros(len(energy), len(rad)), "sr^-1")
else:
self.psf_value = u.Quantity(psf_value).to("sr^-1")
self._interp_kwargs = interp_kwargs or {}
@lazyproperty
def _interpolate(self):
points = (self.energy.value, self.rad.value)
return ScaledRegularGridInterpolator(
points=points, values=self.psf_value, **self._interp_kwargs
)
@lazyproperty
def _interpolate_containment(self):
if self.rad[0] > 0:
rad = self.rad.insert(0, 0)
else:
rad = self.rad
rad_drad = 2 * np.pi * rad * self.evaluate(energy=self.energy, rad=rad)
values = cumtrapz(
rad_drad.to_value("rad-1"), rad.to_value("rad"), initial=0, axis=1
)
points = (self.energy.value, rad)
return ScaledRegularGridInterpolator(points=points, values=values, fill_value=1)
def __str__(self):
ss = "EnergyDependentTablePSF\n"
ss += "-----------------------\n"
ss += "\nAxis info:\n"
ss += " " + array_stats_str(self.rad.to("deg"), "rad")
ss += " " + array_stats_str(self.energy, "energy")
ss += "\nContainment info:\n"
# Print some example containment radii
fractions = [0.68, 0.95]
energies = u.Quantity([10, 100], "GeV")
for fraction in fractions:
rads = self.containment_radius(energy=energies, fraction=fraction)
for energy, rad in zip(energies, rads):
ss += " " + "{}% containment radius at {:3.0f}: {:.2f}\n".format(
100 * fraction, energy, rad
)
return ss
@classmethod
def from_fits(cls, hdu_list):
"""Create `EnergyDependentTablePSF` from ``gtpsf`` format HDU list.
Parameters
----------
hdu_list : `~astropy.io.fits.HDUList`
HDU list with ``THETA`` and ``PSF`` extensions.
"""
rad = Angle(hdu_list["THETA"].data["Theta"], "deg")
energy = u.Quantity(hdu_list["PSF"].data["Energy"], "MeV")
exposure = u.Quantity(hdu_list["PSF"].data["Exposure"], "cm^2 s")
psf_value = u.Quantity(hdu_list["PSF"].data["PSF"], "sr^-1")
return cls(energy, rad, exposure, psf_value)
def to_fits(self):
"""Convert to FITS HDU list format.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# TODO: write HEADER keywords as gtpsf
data = self.rad
theta_hdu = fits.BinTableHDU(data=data, name="Theta")
data = [self.energy, self.exposure, self.psf_value]
psf_hdu = fits.BinTableHDU(data=data, name="PSF")
hdu_list = fits.HDUList([theta_hdu, psf_hdu])
return hdu_list
@classmethod
def read(cls, filename):
"""Create `EnergyDependentTablePSF` from ``gtpsf``-format FITS file.
Parameters
----------
filename : str
File name
"""
filename = str(make_path(filename))
with fits.open(filename, memmap=False) as hdulist:
psf = cls.from_fits(hdulist)
return psf
def write(self, *args, **kwargs):
"""Write to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_fits().writeto(*args, **kwargs)
def evaluate(self, energy=None, rad=None, method="linear"):
"""Evaluate the PSF at a given energy and offset
Parameters
----------
energy : `~astropy.units.Quantity`
Energy value
rad : `~astropy.coordinates.Angle`
Offset wrt source position
method : {"linear", "nearest"}
Linear or nearest neighbour interpolation.
Returns
-------
values : `~astropy.units.Quantity`
Interpolated value
"""
if energy is None:
energy = self.energy
if rad is None:
rad = self.rad
energy = np.atleast_1d(u.Quantity(energy, "GeV").value)[:, np.newaxis]
rad = np.atleast_1d(u.Quantity(rad, "rad").value)
return self._interpolate((energy, rad), clip=True, method=method)
def table_psf_at_energy(self, energy, method="linear", **kwargs):
"""Create `~gammapy.irf.TablePSF` at one given energy.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
method : {"linear", "nearest"}
Linear or nearest neighbour interpolation.
Returns
-------
psf : `~gammapy.irf.TablePSF`
Table PSF
"""
psf_value = self.evaluate(energy=energy, method=method)[0, :]
return TablePSF(self.rad, psf_value, **kwargs)
def table_psf_in_energy_band(self, energy_band, spectrum=None, n_bins=11, **kwargs):
"""Average PSF in a given energy band.
Expected counts in sub energy bands given the given exposure
and spectrum are used as weights.
Parameters
----------
energy_band : `~astropy.units.Quantity`
Energy band
spectrum : `SpectralModel`
Spectral model used for weighting the PSF. Default is a power law
with index=2.
n_bins : int
Number of energy points in the energy band, used to compute the
weigthed PSF.
Returns
-------
psf : `TablePSF`
Table PSF
"""
from ..spectrum.models import PowerLaw, TableModel
if spectrum is None:
spectrum = PowerLaw()
exposure = TableModel(self.energy, self.exposure)
e_min, e_max = energy_band
energy = Energy.equal_log_spacing(emin=e_min, emax=e_max, nbins=n_bins)
weights = (spectrum * exposure)(energy)
weights /= weights.sum()
psf_value = self.evaluate(energy=energy)
psf_value_weighted = weights[:, np.newaxis] * psf_value
return TablePSF(self.rad, psf_value_weighted.sum(axis=0), **kwargs)
def containment_radius(self, energy, fraction=0.68):
"""Containment radius.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
fraction : float
Containment fraction.
Returns
-------
rad : `~astropy.units.Quantity`
Containment radius in deg
"""
# upsamle for better precision
rad_max = Angle(np.linspace(0, self.rad[-1].value, 10 * len(self.rad)), "rad")
containment = self.containment(energy=energy, rad_max=rad_max)
# find nearest containment value
fraction_idx = np.argmin(np.abs(containment - fraction), axis=1)
return rad_max[fraction_idx].to("deg")
def containment(self, energy, rad_max):
"""Compute containment of the PSF.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
rad_max : `~astropy.coordinates.Angle`
Maximum offset angle.
Returns
-------
fraction : array_like
Containment fraction (in range 0 .. 1)
"""
energy = np.atleast_1d(u.Quantity(energy, "GeV").value)[:, np.newaxis]
rad_max = np.atleast_1d(u.Quantity(rad_max, "rad").value)
return self._interpolate_containment((energy, rad_max))
def info(self):
"""Print basic info"""
print(str(self))
def plot_psf_vs_rad(self, energies=None, ax=None, **kwargs):
"""Plot PSF vs radius.
Parameters
----------
energy : `~astropy.units.Quantity`
Energies where to plot the PSF.
**kwargs : dict
Keyword arguments pass to `~matplotlib.pyplot.plot`.
"""
import matplotlib.pyplot as plt
if energies is None:
energies = [100, 1000, 10000] * u.GeV
ax = plt.gca() if ax is None else ax
for energy in energies:
psf_value = np.squeeze(self.evaluate(energy=energy))
label = "{:.0f}".format(energy)
ax.plot(
self.rad.to_value("deg"),
psf_value.to_value("sr-1"),
label=label,
**kwargs
)
ax.set_yscale("log")
ax.set_xlabel("Offset (deg)")
ax.set_ylabel("PSF (1 / sr)")
plt.legend()
return ax
def plot_containment_vs_energy(
self, ax=None, fractions=[0.68, 0.8, 0.95], **kwargs
):
"""Plot containment versus energy."""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
for fraction in fractions:
rad = self.containment_radius(self.energy, fraction)
label = "{:.1f}% Containment".format(100 * fraction)
ax.plot(self.energy.value, rad.value, label=label, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (GeV)")
ax.set_ylabel("Containment radius (deg)")
def plot_exposure_vs_energy(self):
"""Plot exposure versus energy."""
import matplotlib.pyplot as plt
plt.figure(figsize=(4, 3))
plt.plot(self.energy, self.exposure, color="black", lw=3)
plt.semilogx()
plt.xlabel("Energy (MeV)")
plt.ylabel("Exposure (cm^2 s)")
plt.xlim(1e4 / 1.3, 1.3 * 1e6)
plt.ylim(0, 1.5e11)
plt.tight_layout()
def stack(self, psf):
"""Stack two EnergyDependentTablePSF objects.s
Parameters
----------
psf : `EnergyDependentTablePSF`
PSF to stack.
Returns
-------
stacked_psf : `EnergyDependentTablePSF`
Stacked PSF.
"""
exposure = self.exposure + psf.exposure
psf_value = self.psf_value.T * self.exposure + psf.psf_value.T * psf.exposure
with np.errstate(invalid="ignore"):
# exposure can be zero
psf_value = np.nan_to_num(psf_value / exposure)
return self.__class__(
energy=self.energy, rad=self.rad, psf_value=psf_value.T, exposure=exposure
)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy.io import fits
from astropy import units as u
from astropy.coordinates import Angle
from astropy.utils import lazyproperty
from scipy.integrate import cumtrapz
from ..utils.interpolation import ScaledRegularGridInterpolator
from ..utils.gauss import Gauss2DPDF
from ..utils.scripts import make_path
from ..utils.array import array_stats_str
from ..utils.energy import Energy
__all__ = ["TablePSF", "EnergyDependentTablePSF"]
log = logging.getLogger(__name__)
class TablePSF:
r"""Radially-symmetric table PSF.
Parameters
----------
rad : `~astropy.units.Quantity` with angle units
Offset wrt source position
psf_value : `~astropy.units.Quantity` with sr^-1 units
PSF value array
interp_kwargs : dict
Keyword arguments passed to `ScaledRegularGridInterpolator`
"""
def __init__(self, rad, psf_value, interp_kwargs=None):
self.rad = Angle(rad).to("rad")
self.psf_value = u.Quantity(psf_value).to("sr^-1")
self._interp_kwargs = interp_kwargs or {}
@lazyproperty
def _interpolate(self):
points = (self.rad.value,)
return ScaledRegularGridInterpolator(
points=points, values=self.psf_value, **self._interp_kwargs
)
@lazyproperty
def _interpolate_containment(self):
if self.rad[0] > 0:
rad = self.rad.insert(0, 0)
else:
rad = self.rad
rad_drad = 2 * np.pi * rad * self.evaluate(rad)
values = cumtrapz(rad_drad.to_value("rad-1"), rad.to_value("rad"), initial=0)
return ScaledRegularGridInterpolator(points=(rad,), values=values, fill_value=1)
@classmethod
def from_shape(cls, shape, width, rad):
"""Make TablePSF objects with commonly used shapes.
This function is mostly useful for examples and testing.
Parameters
----------
shape : {'disk', 'gauss'}
PSF shape.
width : `~astropy.units.Quantity` with angle units
PSF width angle (radius for disk, sigma for Gauss).
rad : `~astropy.units.Quantity` with angle units
Offset angle
Returns
-------
psf : `TablePSF`
Table PSF
Examples
--------
>>> import numpy as np
>>> from astropy.coordinates import Angle
>>> from gammapy.irf import TablePSF
>>> TablePSF.from_shape(shape='gauss', width='0.2 deg',
... rad=Angle(np.linspace(0, 0.7, 100), 'deg'))
"""
width = Angle(width)
rad = Angle(rad)
if shape == "disk":
amplitude = 1 / (np.pi * width.radian ** 2)
psf_value = np.where(rad < width, amplitude, 0)
elif shape == "gauss":
gauss2d_pdf = Gauss2DPDF(sigma=width.radian)
psf_value = gauss2d_pdf(rad.radian)
else:
raise ValueError("Invalid shape: {}".format(shape))
psf_value = u.Quantity(psf_value, "sr^-1")
return cls(rad, psf_value)
def info(self):
"""Print basic info."""
ss = array_stats_str(self.rad.deg, "offset")
ss += "integral = {}\n".format(self.integral())
for containment in [68, 80, 95]:
radius = self.containment_radius(0.01 * containment)
ss += "containment radius {} deg for {}%\n".format(radius.deg, containment)
return ss
def evaluate(self, rad):
r"""Evaluate PSF.
The following PSF quantities are available:
* 'dp_domega': PDF per 2-dim solid angle :math:`\Omega` in sr^-1
.. math:: \frac{dP}{d\Omega}
Parameters
----------
rad : `~astropy.coordinates.Angle`
Offset wrt source position
Returns
-------
psf_value : `~astropy.units.Quantity`
PSF value
"""
rad = np.atleast_1d(u.Quantity(rad, "rad").value)
return self._interpolate((rad,))
def containment(self, rad_max):
"""Compute PSF containment fraction.
Parameters
----------
rad_max : `~astropy.units.Quantity`
Offset angle range
Returns
-------
integral : float
PSF integral
"""
rad = np.atleast_1d(u.Quantity(rad_max, "rad").value)
return self._interpolate_containment((rad,))
def containment_radius(self, fraction):
"""Containment radius.
Parameters
----------
fraction : array_like
Containment fraction (range 0 .. 1)
Returns
-------
rad : `~astropy.coordinates.Angle`
Containment radius angle
"""
rad_max = Angle(np.linspace(0, self.rad[-1].value, 10 * len(self.rad)), "rad")
containment = self.containment(rad_max=rad_max)
fraction = np.atleast_1d(fraction)
fraction_idx = np.argmin(np.abs(containment - fraction[:, np.newaxis]), axis=1)
return rad_max[fraction_idx].to("deg")
def normalize(self):
"""Normalize PSF to unit integral.
Computes the total PSF integral via the :math:`dP / dr` spline
and then divides the :math:`dP / dr` array.
"""
integral = self.containment(self.rad[-1])
self.psf_value /= integral
def broaden(self, factor, normalize=True):
r"""Broaden PSF by scaling the offset array.
For a broadening factor :math:`f` and the offset
array :math:`r`, the offset array scaled
in the following way:
.. math::
r_{new} = f \times r_{old}
\frac{dP}{dr}(r_{new}) = \frac{dP}{dr}(r_{old})
Parameters
----------
factor : float
Broadening factor
normalize : bool
Normalize PSF after broadening
"""
self.rad *= factor
self._setup_interpolators()
if normalize:
self.normalize()
def plot_psf_vs_rad(self, ax=None, **kwargs):
"""Plot PSF vs radius.
Parameters
----------
ax : ``
kwargs : dict
Keyword arguments passed to `matplotlib.pyplot.plot`
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
ax.plot(self.rad.to_value("deg"), self.psf_value.to_value("sr-1"), **kwargs)
ax.set_yscale("log")
ax.set_xlabel("Radius (deg)")
ax.set_ylabel("PSF (sr-1)")
class EnergyDependentTablePSF:
"""Energy-dependent radially-symmetric table PSF (``gtpsf`` format).
TODO: add references and explanations.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy (1-dim)
rad : `~astropy.units.Quantity` with angle units
Offset angle wrt source position (1-dim)
exposure : `~astropy.units.Quantity`
Exposure (1-dim)
psf_value : `~astropy.units.Quantity`
PSF (2-dim with axes: psf[energy_index, offset_index]
interp_kwargs : dict
Interpolation keyword arguments pass to `ScaledRegularGridInterpolator`.
"""
def __init__(self, energy, rad, exposure=None, psf_value=None, interp_kwargs=None):
self.energy = u.Quantity(energy).to("GeV")
self.rad = u.Quantity(rad).to("radian")
if exposure is None:
self.exposure = u.Quantity(np.ones(len(energy)), "cm^2 s")
else:
self.exposure = u.Quantity(exposure).to("cm^2 s")
if psf_value is None:
self.psf_value = u.Quantity(np.zeros(len(energy), len(rad)), "sr^-1")
else:
self.psf_value = u.Quantity(psf_value).to("sr^-1")
self._interp_kwargs = interp_kwargs or {}
@lazyproperty
def _interpolate(self):
points = (self.energy.value, self.rad.value)
return ScaledRegularGridInterpolator(
points=points, values=self.psf_value, **self._interp_kwargs
)
@lazyproperty
def _interpolate_containment(self):
if self.rad[0] > 0:
rad = self.rad.insert(0, 0)
else:
rad = self.rad
rad_drad = 2 * np.pi * rad * self.evaluate(energy=self.energy, rad=rad)
values = cumtrapz(
rad_drad.to_value("rad-1"), rad.to_value("rad"), initial=0, axis=1
)
points = (self.energy.value, rad)
return ScaledRegularGridInterpolator(points=points, values=values, fill_value=1)
def __str__(self):
ss = "EnergyDependentTablePSF\n"
ss += "-----------------------\n"
ss += "\nAxis info:\n"
ss += " " + array_stats_str(self.rad.to("deg"), "rad")
ss += " " + array_stats_str(self.energy, "energy")
ss += "\nContainment info:\n"
# Print some example containment radii
fractions = [0.68, 0.95]
energies = u.Quantity([10, 100], "GeV")
for fraction in fractions:
rads = self.containment_radius(energy=energies, fraction=fraction)
for energy, rad in zip(energies, rads):
ss += " " + "{}% containment radius at {:3.0f}: {:.2f}\n".format(
100 * fraction, energy, rad
)
return ss
@classmethod
def from_fits(cls, hdu_list):
"""Create `EnergyDependentTablePSF` from ``gtpsf`` format HDU list.
Parameters
----------
hdu_list : `~astropy.io.fits.HDUList`
HDU list with ``THETA`` and ``PSF`` extensions.
"""
rad = Angle(hdu_list["THETA"].data["Theta"], "deg")
energy = u.Quantity(hdu_list["PSF"].data["Energy"], "MeV")
exposure = u.Quantity(hdu_list["PSF"].data["Exposure"], "cm^2 s")
psf_value = u.Quantity(hdu_list["PSF"].data["PSF"], "sr^-1")
return cls(energy, rad, exposure, psf_value)
def to_fits(self):
"""Convert to FITS HDU list format.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# TODO: write HEADER keywords as gtpsf
data = self.rad
theta_hdu = fits.BinTableHDU(data=data, name="Theta")
data = [self.energy, self.exposure, self.psf_value]
psf_hdu = fits.BinTableHDU(data=data, name="PSF")
hdu_list = fits.HDUList([theta_hdu, psf_hdu])
return hdu_list
@classmethod
def read(cls, filename):
"""Create `EnergyDependentTablePSF` from ``gtpsf``-format FITS file.
Parameters
----------
filename : str
File name
"""
filename = str(make_path(filename))
with fits.open(filename, memmap=False) as hdulist:
psf = cls.from_fits(hdulist)
return psf
def write(self, *args, **kwargs):
"""Write to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_fits().writeto(*args, **kwargs)
def evaluate(self, energy=None, rad=None, method="linear"):
"""Evaluate the PSF at a given energy and offset
Parameters
----------
energy : `~astropy.units.Quantity`
Energy value
rad : `~astropy.coordinates.Angle`
Offset wrt source position
method : {"linear", "nearest"}
Linear or nearest neighbour interpolation.
Returns
-------
values : `~astropy.units.Quantity`
Interpolated value
"""
if energy is None:
energy = self.energy
if rad is None:
rad = self.rad
energy = np.atleast_1d(u.Quantity(energy, "GeV").value)[:, np.newaxis]
rad = np.atleast_1d(u.Quantity(rad, "rad").value)
return self._interpolate((energy, rad), clip=True, method=method)
def table_psf_at_energy(self, energy, method="linear", **kwargs):
"""Create `~gammapy.irf.TablePSF` at one given energy.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
method : {"linear", "nearest"}
Linear or nearest neighbour interpolation.
Returns
-------
psf : `~gammapy.irf.TablePSF`
Table PSF
"""
psf_value = self.evaluate(energy=energy, method=method)[0, :]
return TablePSF(self.rad, psf_value, **kwargs)
def table_psf_in_energy_band(self, energy_band, spectrum=None, n_bins=11, **kwargs):
"""Average PSF in a given energy band.
Expected counts in sub energy bands given the given exposure
and spectrum are used as weights.
Parameters
----------
energy_band : `~astropy.units.Quantity`
Energy band
spectrum : `SpectralModel`
Spectral model used for weighting the PSF. Default is a power law
with index=2.
n_bins : int
Number of energy points in the energy band, used to compute the
weigthed PSF.
Returns
-------
psf : `TablePSF`
Table PSF
"""
from ..spectrum.models import PowerLaw, TableModel
if spectrum is None:
spectrum = PowerLaw()
exposure = TableModel(self.energy, self.exposure)
e_min, e_max = energy_band
energy = Energy.equal_log_spacing(emin=e_min, emax=e_max, nbins=n_bins)
weights = (spectrum * exposure)(energy)
weights /= weights.sum()
psf_value = self.evaluate(energy=energy)
psf_value_weighted = weights[:, np.newaxis] * psf_value
return TablePSF(self.rad, psf_value_weighted.sum(axis=0), **kwargs)
def containment_radius(self, energy, fraction=0.68):
"""Containment radius.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
fraction : float
Containment fraction.
Returns
-------
rad : `~astropy.units.Quantity`
Containment radius in deg
"""
# upsamle for better precision
rad_max = Angle(np.linspace(0, self.rad[-1].value, 10 * len(self.rad)), "rad")
containment = self.containment(energy=energy, rad_max=rad_max)
# find nearest containment value
fraction_idx = np.argmin(np.abs(containment - fraction), axis=1)
return rad_max[fraction_idx].to("deg")
def containment(self, energy, rad_max):
"""Compute containment of the PSF.
Parameters
----------
energy : `~astropy.units.Quantity`
Energy
rad_max : `~astropy.coordinates.Angle`
Maximum offset angle.
Returns
-------
fraction : array_like
Containment fraction (in range 0 .. 1)
"""
energy = np.atleast_1d(u.Quantity(energy, "GeV").value)[:, np.newaxis]
rad_max = np.atleast_1d(u.Quantity(rad_max, "rad").value)
return self._interpolate_containment((energy, rad_max))
def info(self):
"""Print basic info"""
print(str(self))
def plot_psf_vs_rad(self, energies=None, ax=None, **kwargs):
"""Plot PSF vs radius.
Parameters
----------
energy : `~astropy.units.Quantity`
Energies where to plot the PSF.
**kwargs : dict
Keyword arguments pass to `~matplotlib.pyplot.plot`.
"""
import matplotlib.pyplot as plt
if energies is None:
energies = [100, 1000, 10000] * u.GeV
ax = plt.gca() if ax is None else ax
for energy in energies:
psf_value = np.squeeze(self.evaluate(energy=energy))
label = "{:.0f}".format(energy)
ax.plot(
self.rad.to_value("deg"),
psf_value.to_value("sr-1"),
label=label,
**kwargs
)
ax.set_yscale("log")
ax.set_xlabel("Offset (deg)")
ax.set_ylabel("PSF (1 / sr)")
plt.legend()
return ax
def plot_containment_vs_energy(
self, ax=None, fractions=[0.68, 0.8, 0.95], **kwargs
):
"""Plot containment versus energy."""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
for fraction in fractions:
rad = self.containment_radius(self.energy, fraction)
label = "{:.1f}% Containment".format(100 * fraction)
ax.plot(self.energy.value, rad.value, label=label, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (GeV)")
ax.set_ylabel("Containment radius (deg)")
def plot_exposure_vs_energy(self):
"""Plot exposure versus energy."""
import matplotlib.pyplot as plt
plt.figure(figsize=(4, 3))
plt.plot(self.energy, self.exposure, color="black", lw=3)
plt.semilogx()
plt.xlabel("Energy (MeV)")
plt.ylabel("Exposure (cm^2 s)")
plt.xlim(1e4 / 1.3, 1.3 * 1e6)
plt.ylim(0, 1.5e11)
plt.tight_layout()
def stack(self, psf):
"""Stack two EnergyDependentTablePSF objects.s
Parameters
----------
psf : `EnergyDependentTablePSF`
PSF to stack.
Returns
-------
stacked_psf : `EnergyDependentTablePSF`
Stacked PSF.
"""
exposure = self.exposure + psf.exposure
psf_value = self.psf_value.T * self.exposure + psf.psf_value.T * psf.exposure
with np.errstate(invalid="ignore"):
# exposure can be zero
psf_value = np.nan_to_num(psf_value / exposure)
return self.__class__(
energy=self.energy, rad=self.rad, psf_value=psf_value.T, exposure=exposure
)
|
en
| 0.410042
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst Radially-symmetric table PSF. Parameters ---------- rad : `~astropy.units.Quantity` with angle units Offset wrt source position psf_value : `~astropy.units.Quantity` with sr^-1 units PSF value array interp_kwargs : dict Keyword arguments passed to `ScaledRegularGridInterpolator` Make TablePSF objects with commonly used shapes. This function is mostly useful for examples and testing. Parameters ---------- shape : {'disk', 'gauss'} PSF shape. width : `~astropy.units.Quantity` with angle units PSF width angle (radius for disk, sigma for Gauss). rad : `~astropy.units.Quantity` with angle units Offset angle Returns ------- psf : `TablePSF` Table PSF Examples -------- >>> import numpy as np >>> from astropy.coordinates import Angle >>> from gammapy.irf import TablePSF >>> TablePSF.from_shape(shape='gauss', width='0.2 deg', ... rad=Angle(np.linspace(0, 0.7, 100), 'deg')) Print basic info. Evaluate PSF. The following PSF quantities are available: * 'dp_domega': PDF per 2-dim solid angle :math:`\Omega` in sr^-1 .. math:: \frac{dP}{d\Omega} Parameters ---------- rad : `~astropy.coordinates.Angle` Offset wrt source position Returns ------- psf_value : `~astropy.units.Quantity` PSF value Compute PSF containment fraction. Parameters ---------- rad_max : `~astropy.units.Quantity` Offset angle range Returns ------- integral : float PSF integral Containment radius. Parameters ---------- fraction : array_like Containment fraction (range 0 .. 1) Returns ------- rad : `~astropy.coordinates.Angle` Containment radius angle Normalize PSF to unit integral. Computes the total PSF integral via the :math:`dP / dr` spline and then divides the :math:`dP / dr` array. Broaden PSF by scaling the offset array. For a broadening factor :math:`f` and the offset array :math:`r`, the offset array scaled in the following way: .. math:: r_{new} = f \times r_{old} \frac{dP}{dr}(r_{new}) = \frac{dP}{dr}(r_{old}) Parameters ---------- factor : float Broadening factor normalize : bool Normalize PSF after broadening Plot PSF vs radius. Parameters ---------- ax : `` kwargs : dict Keyword arguments passed to `matplotlib.pyplot.plot` Energy-dependent radially-symmetric table PSF (``gtpsf`` format). TODO: add references and explanations. Parameters ---------- energy : `~astropy.units.Quantity` Energy (1-dim) rad : `~astropy.units.Quantity` with angle units Offset angle wrt source position (1-dim) exposure : `~astropy.units.Quantity` Exposure (1-dim) psf_value : `~astropy.units.Quantity` PSF (2-dim with axes: psf[energy_index, offset_index] interp_kwargs : dict Interpolation keyword arguments pass to `ScaledRegularGridInterpolator`. # Print some example containment radii Create `EnergyDependentTablePSF` from ``gtpsf`` format HDU list. Parameters ---------- hdu_list : `~astropy.io.fits.HDUList` HDU list with ``THETA`` and ``PSF`` extensions. Convert to FITS HDU list format. Returns ------- hdu_list : `~astropy.io.fits.HDUList` PSF in HDU list format. # TODO: write HEADER keywords as gtpsf Create `EnergyDependentTablePSF` from ``gtpsf``-format FITS file. Parameters ---------- filename : str File name Write to FITS file. Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments. Evaluate the PSF at a given energy and offset Parameters ---------- energy : `~astropy.units.Quantity` Energy value rad : `~astropy.coordinates.Angle` Offset wrt source position method : {"linear", "nearest"} Linear or nearest neighbour interpolation. Returns ------- values : `~astropy.units.Quantity` Interpolated value Create `~gammapy.irf.TablePSF` at one given energy. Parameters ---------- energy : `~astropy.units.Quantity` Energy method : {"linear", "nearest"} Linear or nearest neighbour interpolation. Returns ------- psf : `~gammapy.irf.TablePSF` Table PSF Average PSF in a given energy band. Expected counts in sub energy bands given the given exposure and spectrum are used as weights. Parameters ---------- energy_band : `~astropy.units.Quantity` Energy band spectrum : `SpectralModel` Spectral model used for weighting the PSF. Default is a power law with index=2. n_bins : int Number of energy points in the energy band, used to compute the weigthed PSF. Returns ------- psf : `TablePSF` Table PSF Containment radius. Parameters ---------- energy : `~astropy.units.Quantity` Energy fraction : float Containment fraction. Returns ------- rad : `~astropy.units.Quantity` Containment radius in deg # upsamle for better precision # find nearest containment value Compute containment of the PSF. Parameters ---------- energy : `~astropy.units.Quantity` Energy rad_max : `~astropy.coordinates.Angle` Maximum offset angle. Returns ------- fraction : array_like Containment fraction (in range 0 .. 1) Print basic info Plot PSF vs radius. Parameters ---------- energy : `~astropy.units.Quantity` Energies where to plot the PSF. **kwargs : dict Keyword arguments pass to `~matplotlib.pyplot.plot`. Plot containment versus energy. Plot exposure versus energy. Stack two EnergyDependentTablePSF objects.s Parameters ---------- psf : `EnergyDependentTablePSF` PSF to stack. Returns ------- stacked_psf : `EnergyDependentTablePSF` Stacked PSF. # exposure can be zero
| 2.132038
| 2
|
JumpScale9Lib/clients/etcd/EtcdFactory.py
|
Jumpscale/lib9
| 2
|
6629223
|
<reponame>Jumpscale/lib9
from js9 import j
from .EtcdClient import EtcdClient
JSConfigFactoryBase = j.tools.configmanager.base_class_configs
class EtcdFactory(JSConfigFactoryBase):
def __init__(self):
self.__jslocation__ = "j.clients.etcd"
super().__init__(child_class=EtcdClient)
|
from js9 import j
from .EtcdClient import EtcdClient
JSConfigFactoryBase = j.tools.configmanager.base_class_configs
class EtcdFactory(JSConfigFactoryBase):
def __init__(self):
self.__jslocation__ = "j.clients.etcd"
super().__init__(child_class=EtcdClient)
|
none
| 1
| 1.710135
| 2
|
|
landlord/migrations/0009_auto_20210608_0749.py
|
manishwins/Greenline
| 0
|
6629224
|
<gh_stars>0
# Generated by Django 3.1.7 on 2021-06-08 07:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landlord', '0008_auto_20210607_1050'),
]
operations = [
migrations.AddField(
model_name='properties',
name='address',
field=models.TextField(blank=True, null=True),
),
migrations.AlterModelTable(
name='propertyimage',
table='property_image',
),
]
|
# Generated by Django 3.1.7 on 2021-06-08 07:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landlord', '0008_auto_20210607_1050'),
]
operations = [
migrations.AddField(
model_name='properties',
name='address',
field=models.TextField(blank=True, null=True),
),
migrations.AlterModelTable(
name='propertyimage',
table='property_image',
),
]
|
en
| 0.781074
|
# Generated by Django 3.1.7 on 2021-06-08 07:49
| 1.54682
| 2
|
kattis/kemija.py
|
terror/Solutions
| 2
|
6629225
|
<reponame>terror/Solutions
s = input()
s = s.replace('apa', 'a').replace('epe', 'e').replace('ipi', 'i').replace('opo', 'o').replace('upu', 'u')
print(s)
|
s = input()
s = s.replace('apa', 'a').replace('epe', 'e').replace('ipi', 'i').replace('opo', 'o').replace('upu', 'u')
print(s)
|
none
| 1
| 3.529792
| 4
|
|
src/OV7670FIFO.py
|
ESPuPy/ESP32WiFiCAM-OV7670
| 3
|
6629226
|
<reponame>ESPuPy/ESP32WiFiCAM-OV7670
#-------------------------------------------
#
# ESP32 WiFi Camera (OV7670 FIFO Version)
# ESP32WiFiCAM-OV7670
#
# file:OV7670FIFO.py
#
from machine import Pin
import utime
class OV7670FIFO():
"""OV7670 FIFO Conrol class"""
def __init__(self, spi, vsync, rdclk, we, rrst, wrst, pl):
self.spi = spi
self.vsync = Pin(vsync, Pin.IN) # OV VSYNC No39
self.rd_clk = Pin(rdclk, Pin.OUT) # FIFO ReadClock No0
self.we = Pin(we, Pin.OUT) # FIFO Write Enable No26
self.rrst = Pin(rrst, Pin.OUT) # FIFO ReadReset No21
self.wrst = Pin(wrst, Pin.OUT) # FIFO Write Reset No22
self.pl = Pin(pl, Pin.OUT) # TTL PL No25
self.pl.on()
self.rd_clk.on() # set RD CLK H
#
# from AL422 Data Sheets(Revision V1.1)
#
def readReset(self):
"""Reset FIFO Read Address"""
self.rrst.on() # set ReadReset:H
self.rd_clk.on()
self.rrst.off() # set ReadReset:L
self.rd_clk.off()
self.rd_clk.on()
self.rrst.on() # set ReadReset:H
def FIFOWriteReset(self):
"""Reset FIFO Write Address"""
self.wrst.on()
self.wrst.off()
self.wrst.on()
def FIFOWriteEnable(self):
self.we.on()
def FIFOWriteDisable(self):
self.we.off()
def readClockOneShot(self): # RD_CLK:H->L->H
self.rd_clk.on()
self.rd_clk.off()
self.rd_clk.on()
def dumpFIFO(self, yy=16):
for y in range(yy):
for x in range(16):
self.rd_clk.on()
self.pl.off()
self.pl.on()
pixel = self.spi.read(1)
print('{:02x} '.format(pixel[0]), end='')
self.rd_clk.off()
print('')
def takePicture(self, verb=False):
prevLevel=0
weLevel=0
takePicture=False
self.FIFOWriteDisable()
self.FIFOWriteReset()
while not takePicture:
nowLevel = self.vsync.value()
if nowLevel == 1: # Holizontal Sync
if prevLevel == 0: # Holizonal Sync L->H Edge
#print('\nL->H', end='')
if weLevel == 0:
self.FIFOWriteEnable()
weLevel=1
if verb:
print('Shutter On')
else:
self.FIFOWriteDisable()
weLevel=0
takePicture=True
if verb:
print('Shutter Off')
prevLevel=nowLevel
if takePicture:
print('OK! take picture')
else:
print('fail to take picture')
self.FIFOWriteDisable()
self.FIFOWriteReset()
#
# buf.append() = self.spi.read(1)[0] takes 4.57
#
def getPixelFromFIFO(self, buf, size, readReset=False):
tmpBuf=bytearray(1)
if readReset:
self.readReset()
for i in range(size):
self.rd_clk.on()
self.pl.off()
self.pl.on()
self.spi.readinto(tmpBuf)
buf[i]=tmpBuf[0]
self.rd_clk.off()
def getImageAndSave(self, imageSize, fileName):
BUFSIZE=512
self.readReset()
self.pl.on()
print(fileName)
s = utime.ticks_ms()
f = open(fileName, mode='wb')
scale=20 # indicator max value is 20
gaugeScale = int(imageSize/scale)
buf=bytearray(BUFSIZE)
for x in range(0, imageSize, BUFSIZE):
self.getPixelFromFIFO(buf, BUFSIZE, False)
f.write(buf)
if (x % gaugeScale) == 0 :
print('{:02d} '.format(scale - int(x/gaugeScale)), end='')
f.close()
print("\n")
e = utime.ticks_ms()
print("save time:{:d}(ms)".format(utime.ticks_diff(e, s)))
return True
|
#-------------------------------------------
#
# ESP32 WiFi Camera (OV7670 FIFO Version)
# ESP32WiFiCAM-OV7670
#
# file:OV7670FIFO.py
#
from machine import Pin
import utime
class OV7670FIFO():
"""OV7670 FIFO Conrol class"""
def __init__(self, spi, vsync, rdclk, we, rrst, wrst, pl):
self.spi = spi
self.vsync = Pin(vsync, Pin.IN) # OV VSYNC No39
self.rd_clk = Pin(rdclk, Pin.OUT) # FIFO ReadClock No0
self.we = Pin(we, Pin.OUT) # FIFO Write Enable No26
self.rrst = Pin(rrst, Pin.OUT) # FIFO ReadReset No21
self.wrst = Pin(wrst, Pin.OUT) # FIFO Write Reset No22
self.pl = Pin(pl, Pin.OUT) # TTL PL No25
self.pl.on()
self.rd_clk.on() # set RD CLK H
#
# from AL422 Data Sheets(Revision V1.1)
#
def readReset(self):
"""Reset FIFO Read Address"""
self.rrst.on() # set ReadReset:H
self.rd_clk.on()
self.rrst.off() # set ReadReset:L
self.rd_clk.off()
self.rd_clk.on()
self.rrst.on() # set ReadReset:H
def FIFOWriteReset(self):
"""Reset FIFO Write Address"""
self.wrst.on()
self.wrst.off()
self.wrst.on()
def FIFOWriteEnable(self):
self.we.on()
def FIFOWriteDisable(self):
self.we.off()
def readClockOneShot(self): # RD_CLK:H->L->H
self.rd_clk.on()
self.rd_clk.off()
self.rd_clk.on()
def dumpFIFO(self, yy=16):
for y in range(yy):
for x in range(16):
self.rd_clk.on()
self.pl.off()
self.pl.on()
pixel = self.spi.read(1)
print('{:02x} '.format(pixel[0]), end='')
self.rd_clk.off()
print('')
def takePicture(self, verb=False):
prevLevel=0
weLevel=0
takePicture=False
self.FIFOWriteDisable()
self.FIFOWriteReset()
while not takePicture:
nowLevel = self.vsync.value()
if nowLevel == 1: # Holizontal Sync
if prevLevel == 0: # Holizonal Sync L->H Edge
#print('\nL->H', end='')
if weLevel == 0:
self.FIFOWriteEnable()
weLevel=1
if verb:
print('Shutter On')
else:
self.FIFOWriteDisable()
weLevel=0
takePicture=True
if verb:
print('Shutter Off')
prevLevel=nowLevel
if takePicture:
print('OK! take picture')
else:
print('fail to take picture')
self.FIFOWriteDisable()
self.FIFOWriteReset()
#
# buf.append() = self.spi.read(1)[0] takes 4.57
#
def getPixelFromFIFO(self, buf, size, readReset=False):
tmpBuf=bytearray(1)
if readReset:
self.readReset()
for i in range(size):
self.rd_clk.on()
self.pl.off()
self.pl.on()
self.spi.readinto(tmpBuf)
buf[i]=tmpBuf[0]
self.rd_clk.off()
def getImageAndSave(self, imageSize, fileName):
BUFSIZE=512
self.readReset()
self.pl.on()
print(fileName)
s = utime.ticks_ms()
f = open(fileName, mode='wb')
scale=20 # indicator max value is 20
gaugeScale = int(imageSize/scale)
buf=bytearray(BUFSIZE)
for x in range(0, imageSize, BUFSIZE):
self.getPixelFromFIFO(buf, BUFSIZE, False)
f.write(buf)
if (x % gaugeScale) == 0 :
print('{:02d} '.format(scale - int(x/gaugeScale)), end='')
f.close()
print("\n")
e = utime.ticks_ms()
print("save time:{:d}(ms)".format(utime.ticks_diff(e, s)))
return True
|
en
| 0.307006
|
#------------------------------------------- # # ESP32 WiFi Camera (OV7670 FIFO Version) # ESP32WiFiCAM-OV7670 # # file:OV7670FIFO.py # OV7670 FIFO Conrol class # OV VSYNC No39 # FIFO ReadClock No0 # FIFO Write Enable No26 # FIFO ReadReset No21 # FIFO Write Reset No22 # TTL PL No25 # set RD CLK H # # from AL422 Data Sheets(Revision V1.1) # Reset FIFO Read Address # set ReadReset:H # set ReadReset:L # set ReadReset:H Reset FIFO Write Address # RD_CLK:H->L->H # Holizontal Sync # Holizonal Sync L->H Edge #print('\nL->H', end='') # # buf.append() = self.spi.read(1)[0] takes 4.57 # # indicator max value is 20
| 2.59782
| 3
|
src/ipyradiant/__init__.py
|
RhythmSyed/ipyradiant
| 0
|
6629227
|
""" ipyradiant main file
"""
# Copyright (c) 2020 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
from ._version import __version__
from .basic_tools import (
CustomURIRef,
MultiPanelSelect,
PredicateMultiselectApp,
collapse_predicates,
)
from .loader import FileManager, PathLoader, UpLoader
from .query import QueryWidget, service_patch_rdflib
from .visualization import CytoscapeVisualizer, DatashaderVisualizer, LayoutSelector
__all__ = [
"__version__",
"CytoscapeVisualizer",
"DatashaderVisualizer",
"FileManager",
"LayoutSelector",
"PathLoader",
"QueryWidget",
"UpLoader",
"MultiPanelSelect",
"service_patch_rdflib",
"CustomURIRef",
"PredicateMultiselectApp",
"collapse_predicates",
]
|
""" ipyradiant main file
"""
# Copyright (c) 2020 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
from ._version import __version__
from .basic_tools import (
CustomURIRef,
MultiPanelSelect,
PredicateMultiselectApp,
collapse_predicates,
)
from .loader import FileManager, PathLoader, UpLoader
from .query import QueryWidget, service_patch_rdflib
from .visualization import CytoscapeVisualizer, DatashaderVisualizer, LayoutSelector
__all__ = [
"__version__",
"CytoscapeVisualizer",
"DatashaderVisualizer",
"FileManager",
"LayoutSelector",
"PathLoader",
"QueryWidget",
"UpLoader",
"MultiPanelSelect",
"service_patch_rdflib",
"CustomURIRef",
"PredicateMultiselectApp",
"collapse_predicates",
]
|
en
| 0.757013
|
ipyradiant main file # Copyright (c) 2020 ipyradiant contributors. # Distributed under the terms of the Modified BSD License.
| 1.055114
| 1
|
roblox/abc.py
|
jpatrickdill/roblox.py
| 1
|
6629228
|
# ABC classes for Roblox
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from datetime import datetime
from typing import AsyncGenerator, Optional, List, Union
from roblox.enums import AssetType
class User(metaclass=ABCMeta):
"""An ABC that details common operations on a Roblox user."""
# @classmethod
# def __subclasshook__(cls, C):
# if cls is User:
# mro = C.__mro__
# for attr in ("username", "id", "description", "status", "created_at", "banned"):
# for base in mro:
# if attr in base.__dict__:
# break
# else:
# return NotImplemented
# return True
# return NotImplemented
@property
@abstractmethod
async def id(self) -> int:
"""
Async property that returns the User's ID.
"""
raise NotImplemented
@property
@abstractmethod
async def username(self) -> str:
"""
Async property that returns the User's username.
"""
raise NotImplemented
@property
@abstractmethod
async def url(self) -> str:
"""
Async property that returns the User's profile URL.
"""
raise NotImplemented
@property
@abstractmethod
async def created_at(self) -> datetime:
"""
Async property that returns the datetime at which the user was created.
"""
raise NotImplemented
@property
@abstractmethod
async def description(self) -> str:
"""
Async property that returns the User's profile description.
"""
raise NotImplemented
@abstractmethod
async def status(self) -> str:
"""
Returns the User's current status.
"""
raise NotImplemented
@property
@abstractmethod
async def is_banned(self) -> bool:
"""
Async property that returns whether the user is banned.
"""
raise NotImplemented
@property
@abstractmethod
async def is_premium(self) -> bool:
"""
Async property that returns whether the user has a premium subscription.
"""
raise NotImplemented
@abstractmethod
async def friends(self) -> AsyncGenerator[User]:
"""
Async Generator yielding the user's friends.
"""
raise NotImplemented
@abstractmethod
async def is_friends(self, other: Optional[User] = None) -> bool:
"""
Checks whether this user is friends with another user or the client user.
"""
raise NotImplemented
@property
@abstractmethod
def followers(self):
"""
Property that returns FollowerList for this user.
"""
raise NotImplemented
@property
@abstractmethod
def followings(self):
"""
Property that returns FollowingsList for this user.
"""
raise NotImplemented
@property
@abstractmethod
def inventory(self):
"""
Property that returns Inventory for this user.
"""
raise NotImplemented
@abstractmethod
def games(self) -> AsyncGenerator[Universe, None]:
"""
Async Generator that yields the user's games.
"""
raise NotImplemented
class ClientUser(metaclass=ABCMeta):
"""An ABC that details operations on the client user."""
@abstractmethod
async def set_status(self, status: str) -> str:
"""
Sets the client user's status.
:param status: New status.
:return: Moderated status.
"""
raise NotImplemented
@property
@abstractmethod
async def robux(self) -> int:
"""
Returns the client user's amount of currency.
"""
class OtherUser(metaclass=ABCMeta):
"""An ABC that details operations on non-client users."""
async def follow(self):
"""Follows this user from the client user."""
raise NotImplemented
async def unfollow(self):
"""Unfollows this user from the client user."""
raise NotImplemented
async def request_friendship(self):
"""Sends a friend request to this user."""
raise NotImplemented
async def unfriend(self):
"""Unfriends this user.."""
raise NotImplemented
class DisplayPage(metaclass=ABCMeta):
"""An ABC that details an object with a display page, such as an asset, place, or universe."""
@property
@abstractmethod
async def id(self) -> int:
"""
Async property that returns the object's ID.
"""
raise NotImplemented
@property
@abstractmethod
async def name(self) -> str:
"""
Async property that returns the object's name.
"""
raise NotImplemented
@property
@abstractmethod
async def description(self) -> str:
"""
Async property that returns the object's description.
"""
raise NotImplemented
@property
@abstractmethod
async def url(self) -> str:
"""
Async property that returns the object's URL.
"""
raise NotImplemented
@property
@abstractmethod
async def created_at(self) -> datetime:
"""
Async property that returns when the object was created.
"""
raise NotImplemented
@property
@abstractmethod
async def updated_at(self) -> datetime:
"""
Async property that returns when the object was last updated.
"""
raise NotImplemented
class Votable(metaclass=ABCMeta):
"""ABC that represents on object that can be voted on, e.g., favorites, thumbs-up, thumbs-down"""
@property
@abstractmethod
async def favorites(self) -> int:
"""
Async property that returns the asset's current number of favorites.
"""
raise NotImplemented
@property
@abstractmethod
async def is_favorited(self) -> bool:
"""
Async property that returns whether the asset is favorited by the client.
"""
raise NotImplemented
@abstractmethod
async def favorite(self):
"""
Favorites the asset for the client user.
"""
raise NotImplemented
@abstractmethod
async def unfavorite(self):
"""
Unfavorites the asset for the client user.
"""
raise NotImplemented
class Asset(DisplayPage, Votable, metaclass=ABCMeta):
"""An ABC that details common operations on a Roblox asset."""
@property
@abstractmethod
async def type(self) -> AssetType:
"""
Async property that returns the Asset's type.
"""
raise NotImplemented
@property
@abstractmethod
async def price(self) -> int:
"""
Async property that returns the asset's current price in Robux.
"""
raise NotImplemented
@property
@abstractmethod
async def for_sale(self) -> bool:
"""
Async property that returns whether the asset can be purchased.
"""
raise NotImplemented
@property
@abstractmethod
async def creator(self) -> User:
"""
Async property that returns the creator of the asset.
"""
raise NotImplemented
@abstractmethod
async def purchase(self, expected_price: Optional[int] = None):
"""
Purchases the asset for the client user. If `expected_price` is specified, the asset will not be
purchased unless the `expected_price` matches the current price.
"""
raise NotImplemented
@abstractmethod
async def delete(self):
"""
Deletes asset from the client user's inventory.
"""
class Place(Asset, metaclass=ABCMeta):
"""An ABC that details operations on a Roblox Place asset."""
@property
@abstractmethod
async def universe(self) -> Universe:
"""Async property that returns the Universe the place belongs to."""
raise NotImplemented
class Universe(DisplayPage, Votable, metaclass=ABCMeta):
"""An ABC that details common operations on a Roblox Universe (Game)."""
@property
@abstractmethod
async def visits(self) -> int:
"""Async property that returns the number of visits to this game."""
raise NotImplemented
@property
@abstractmethod
async def playing(self) -> int:
"""Async property that returns the number of players in this game."""
raise NotImplemented
@property
@abstractmethod
async def max_players(self) -> int:
"""Async property that returns the max players per server in this game."""
raise NotImplemented
@property
@abstractmethod
async def root_place(self) -> Place:
"""Async property that returns the universe's root place."""
raise NotImplemented
class Group(DisplayPage, metaclass=ABCMeta):
"""ABC detailing operations on a Roblox Group."""
@property
@abstractmethod
async def owner(self) -> Optional[User]:
"""Async property that returns the group's current owner, if it has one."""
raise NotImplemented
@property
@abstractmethod
async def shout(self) -> Optional[Shout]:
"""Async property that returns the group's current shout."""
raise NotImplemented
@property
@abstractmethod
async def members(self) -> AsyncGenerator[GroupMember, None]:
"""Async generator that yields the group's members."""
raise NotImplemented
@abstractmethod
async def get_member(self, user: Union[User, str, int]) -> GroupMember:
"""Tries to find a group member given a username, user ID, or User object."""
raise NotImplemented
@property
@abstractmethod
async def is_public(self) -> bool:
"""Async property that returns whether the group allows public entry."""
raise NotImplemented
@property
@abstractmethod
async def roles(self) -> List[Role]:
"""Async property that returns a list of the group's roles"""
raise NotImplemented
class GroupMember(User, metaclass=ABCMeta):
"""ABC describing operations on a Group Member."""
@property
@abstractmethod
async def role(self) -> Role:
"""Async property that eturns the member's group role."""
raise NotImplemented
@property
@abstractmethod
async def rank(self) -> int:
"""Shortcut for the numerical rank of the member's role."""
raise NotImplemented
class Role(metaclass=ABCMeta):
"""ABC describing a group roleset."""
@property
@abstractmethod
async def id(self) -> int:
"""Async property that returns the role's ID."""
raise NotImplemented
@property
@abstractmethod
async def name(self) -> str:
"""Async property that returns the role's name."""
raise NotImplemented
@property
@abstractmethod
async def description(self) -> str:
"""Async property that returns the role's description."""
raise NotImplemented
@property
@abstractmethod
async def rank(self) -> int:
"""Async property that returns the role's numerical rank."""
raise NotImplemented
@property
@abstractmethod
async def member_count(self) -> int:
"""Async property that returns the number of members with this role."""
raise NotImplemented
class Shout(metaclass=ABCMeta):
"""ABC describing a group shout."""
@property
@abstractmethod
def body(self) -> str:
"""Returns the shout's body."""
raise NotImplemented
@property
@abstractmethod
def created_at(self) -> datetime:
"""Returns the time the shout was created at."""
raise NotImplemented
@property
@abstractmethod
async def poster(self) -> User:
"""Returns the user who posted the shout."""
raise NotImplemented
|
# ABC classes for Roblox
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from datetime import datetime
from typing import AsyncGenerator, Optional, List, Union
from roblox.enums import AssetType
class User(metaclass=ABCMeta):
"""An ABC that details common operations on a Roblox user."""
# @classmethod
# def __subclasshook__(cls, C):
# if cls is User:
# mro = C.__mro__
# for attr in ("username", "id", "description", "status", "created_at", "banned"):
# for base in mro:
# if attr in base.__dict__:
# break
# else:
# return NotImplemented
# return True
# return NotImplemented
@property
@abstractmethod
async def id(self) -> int:
"""
Async property that returns the User's ID.
"""
raise NotImplemented
@property
@abstractmethod
async def username(self) -> str:
"""
Async property that returns the User's username.
"""
raise NotImplemented
@property
@abstractmethod
async def url(self) -> str:
"""
Async property that returns the User's profile URL.
"""
raise NotImplemented
@property
@abstractmethod
async def created_at(self) -> datetime:
"""
Async property that returns the datetime at which the user was created.
"""
raise NotImplemented
@property
@abstractmethod
async def description(self) -> str:
"""
Async property that returns the User's profile description.
"""
raise NotImplemented
@abstractmethod
async def status(self) -> str:
"""
Returns the User's current status.
"""
raise NotImplemented
@property
@abstractmethod
async def is_banned(self) -> bool:
"""
Async property that returns whether the user is banned.
"""
raise NotImplemented
@property
@abstractmethod
async def is_premium(self) -> bool:
"""
Async property that returns whether the user has a premium subscription.
"""
raise NotImplemented
@abstractmethod
async def friends(self) -> AsyncGenerator[User]:
"""
Async Generator yielding the user's friends.
"""
raise NotImplemented
@abstractmethod
async def is_friends(self, other: Optional[User] = None) -> bool:
"""
Checks whether this user is friends with another user or the client user.
"""
raise NotImplemented
@property
@abstractmethod
def followers(self):
"""
Property that returns FollowerList for this user.
"""
raise NotImplemented
@property
@abstractmethod
def followings(self):
"""
Property that returns FollowingsList for this user.
"""
raise NotImplemented
@property
@abstractmethod
def inventory(self):
"""
Property that returns Inventory for this user.
"""
raise NotImplemented
@abstractmethod
def games(self) -> AsyncGenerator[Universe, None]:
"""
Async Generator that yields the user's games.
"""
raise NotImplemented
class ClientUser(metaclass=ABCMeta):
"""An ABC that details operations on the client user."""
@abstractmethod
async def set_status(self, status: str) -> str:
"""
Sets the client user's status.
:param status: New status.
:return: Moderated status.
"""
raise NotImplemented
@property
@abstractmethod
async def robux(self) -> int:
"""
Returns the client user's amount of currency.
"""
class OtherUser(metaclass=ABCMeta):
"""An ABC that details operations on non-client users."""
async def follow(self):
"""Follows this user from the client user."""
raise NotImplemented
async def unfollow(self):
"""Unfollows this user from the client user."""
raise NotImplemented
async def request_friendship(self):
"""Sends a friend request to this user."""
raise NotImplemented
async def unfriend(self):
"""Unfriends this user.."""
raise NotImplemented
class DisplayPage(metaclass=ABCMeta):
"""An ABC that details an object with a display page, such as an asset, place, or universe."""
@property
@abstractmethod
async def id(self) -> int:
"""
Async property that returns the object's ID.
"""
raise NotImplemented
@property
@abstractmethod
async def name(self) -> str:
"""
Async property that returns the object's name.
"""
raise NotImplemented
@property
@abstractmethod
async def description(self) -> str:
"""
Async property that returns the object's description.
"""
raise NotImplemented
@property
@abstractmethod
async def url(self) -> str:
"""
Async property that returns the object's URL.
"""
raise NotImplemented
@property
@abstractmethod
async def created_at(self) -> datetime:
"""
Async property that returns when the object was created.
"""
raise NotImplemented
@property
@abstractmethod
async def updated_at(self) -> datetime:
"""
Async property that returns when the object was last updated.
"""
raise NotImplemented
class Votable(metaclass=ABCMeta):
"""ABC that represents on object that can be voted on, e.g., favorites, thumbs-up, thumbs-down"""
@property
@abstractmethod
async def favorites(self) -> int:
"""
Async property that returns the asset's current number of favorites.
"""
raise NotImplemented
@property
@abstractmethod
async def is_favorited(self) -> bool:
"""
Async property that returns whether the asset is favorited by the client.
"""
raise NotImplemented
@abstractmethod
async def favorite(self):
"""
Favorites the asset for the client user.
"""
raise NotImplemented
@abstractmethod
async def unfavorite(self):
"""
Unfavorites the asset for the client user.
"""
raise NotImplemented
class Asset(DisplayPage, Votable, metaclass=ABCMeta):
"""An ABC that details common operations on a Roblox asset."""
@property
@abstractmethod
async def type(self) -> AssetType:
"""
Async property that returns the Asset's type.
"""
raise NotImplemented
@property
@abstractmethod
async def price(self) -> int:
"""
Async property that returns the asset's current price in Robux.
"""
raise NotImplemented
@property
@abstractmethod
async def for_sale(self) -> bool:
"""
Async property that returns whether the asset can be purchased.
"""
raise NotImplemented
@property
@abstractmethod
async def creator(self) -> User:
"""
Async property that returns the creator of the asset.
"""
raise NotImplemented
@abstractmethod
async def purchase(self, expected_price: Optional[int] = None):
"""
Purchases the asset for the client user. If `expected_price` is specified, the asset will not be
purchased unless the `expected_price` matches the current price.
"""
raise NotImplemented
@abstractmethod
async def delete(self):
"""
Deletes asset from the client user's inventory.
"""
class Place(Asset, metaclass=ABCMeta):
"""An ABC that details operations on a Roblox Place asset."""
@property
@abstractmethod
async def universe(self) -> Universe:
"""Async property that returns the Universe the place belongs to."""
raise NotImplemented
class Universe(DisplayPage, Votable, metaclass=ABCMeta):
"""An ABC that details common operations on a Roblox Universe (Game)."""
@property
@abstractmethod
async def visits(self) -> int:
"""Async property that returns the number of visits to this game."""
raise NotImplemented
@property
@abstractmethod
async def playing(self) -> int:
"""Async property that returns the number of players in this game."""
raise NotImplemented
@property
@abstractmethod
async def max_players(self) -> int:
"""Async property that returns the max players per server in this game."""
raise NotImplemented
@property
@abstractmethod
async def root_place(self) -> Place:
"""Async property that returns the universe's root place."""
raise NotImplemented
class Group(DisplayPage, metaclass=ABCMeta):
"""ABC detailing operations on a Roblox Group."""
@property
@abstractmethod
async def owner(self) -> Optional[User]:
"""Async property that returns the group's current owner, if it has one."""
raise NotImplemented
@property
@abstractmethod
async def shout(self) -> Optional[Shout]:
"""Async property that returns the group's current shout."""
raise NotImplemented
@property
@abstractmethod
async def members(self) -> AsyncGenerator[GroupMember, None]:
"""Async generator that yields the group's members."""
raise NotImplemented
@abstractmethod
async def get_member(self, user: Union[User, str, int]) -> GroupMember:
"""Tries to find a group member given a username, user ID, or User object."""
raise NotImplemented
@property
@abstractmethod
async def is_public(self) -> bool:
"""Async property that returns whether the group allows public entry."""
raise NotImplemented
@property
@abstractmethod
async def roles(self) -> List[Role]:
"""Async property that returns a list of the group's roles"""
raise NotImplemented
class GroupMember(User, metaclass=ABCMeta):
"""ABC describing operations on a Group Member."""
@property
@abstractmethod
async def role(self) -> Role:
"""Async property that eturns the member's group role."""
raise NotImplemented
@property
@abstractmethod
async def rank(self) -> int:
"""Shortcut for the numerical rank of the member's role."""
raise NotImplemented
class Role(metaclass=ABCMeta):
"""ABC describing a group roleset."""
@property
@abstractmethod
async def id(self) -> int:
"""Async property that returns the role's ID."""
raise NotImplemented
@property
@abstractmethod
async def name(self) -> str:
"""Async property that returns the role's name."""
raise NotImplemented
@property
@abstractmethod
async def description(self) -> str:
"""Async property that returns the role's description."""
raise NotImplemented
@property
@abstractmethod
async def rank(self) -> int:
"""Async property that returns the role's numerical rank."""
raise NotImplemented
@property
@abstractmethod
async def member_count(self) -> int:
"""Async property that returns the number of members with this role."""
raise NotImplemented
class Shout(metaclass=ABCMeta):
"""ABC describing a group shout."""
@property
@abstractmethod
def body(self) -> str:
"""Returns the shout's body."""
raise NotImplemented
@property
@abstractmethod
def created_at(self) -> datetime:
"""Returns the time the shout was created at."""
raise NotImplemented
@property
@abstractmethod
async def poster(self) -> User:
"""Returns the user who posted the shout."""
raise NotImplemented
|
en
| 0.84417
|
# ABC classes for Roblox An ABC that details common operations on a Roblox user. # @classmethod # def __subclasshook__(cls, C): # if cls is User: # mro = C.__mro__ # for attr in ("username", "id", "description", "status", "created_at", "banned"): # for base in mro: # if attr in base.__dict__: # break # else: # return NotImplemented # return True # return NotImplemented Async property that returns the User's ID. Async property that returns the User's username. Async property that returns the User's profile URL. Async property that returns the datetime at which the user was created. Async property that returns the User's profile description. Returns the User's current status. Async property that returns whether the user is banned. Async property that returns whether the user has a premium subscription. Async Generator yielding the user's friends. Checks whether this user is friends with another user or the client user. Property that returns FollowerList for this user. Property that returns FollowingsList for this user. Property that returns Inventory for this user. Async Generator that yields the user's games. An ABC that details operations on the client user. Sets the client user's status. :param status: New status. :return: Moderated status. Returns the client user's amount of currency. An ABC that details operations on non-client users. Follows this user from the client user. Unfollows this user from the client user. Sends a friend request to this user. Unfriends this user.. An ABC that details an object with a display page, such as an asset, place, or universe. Async property that returns the object's ID. Async property that returns the object's name. Async property that returns the object's description. Async property that returns the object's URL. Async property that returns when the object was created. Async property that returns when the object was last updated. ABC that represents on object that can be voted on, e.g., favorites, thumbs-up, thumbs-down Async property that returns the asset's current number of favorites. Async property that returns whether the asset is favorited by the client. Favorites the asset for the client user. Unfavorites the asset for the client user. An ABC that details common operations on a Roblox asset. Async property that returns the Asset's type. Async property that returns the asset's current price in Robux. Async property that returns whether the asset can be purchased. Async property that returns the creator of the asset. Purchases the asset for the client user. If `expected_price` is specified, the asset will not be purchased unless the `expected_price` matches the current price. Deletes asset from the client user's inventory. An ABC that details operations on a Roblox Place asset. Async property that returns the Universe the place belongs to. An ABC that details common operations on a Roblox Universe (Game). Async property that returns the number of visits to this game. Async property that returns the number of players in this game. Async property that returns the max players per server in this game. Async property that returns the universe's root place. ABC detailing operations on a Roblox Group. Async property that returns the group's current owner, if it has one. Async property that returns the group's current shout. Async generator that yields the group's members. Tries to find a group member given a username, user ID, or User object. Async property that returns whether the group allows public entry. Async property that returns a list of the group's roles ABC describing operations on a Group Member. Async property that eturns the member's group role. Shortcut for the numerical rank of the member's role. ABC describing a group roleset. Async property that returns the role's ID. Async property that returns the role's name. Async property that returns the role's description. Async property that returns the role's numerical rank. Async property that returns the number of members with this role. ABC describing a group shout. Returns the shout's body. Returns the time the shout was created at. Returns the user who posted the shout.
| 2.825297
| 3
|
11/11a.py
|
atnguyen1/Adventofcode2020
| 0
|
6629229
|
#!/usr/bin/env python3
import argparse
import numpy as np
from collections import Counter
import sys
np.set_printoptions(threshold=sys.maxsize)
class gameoflife:
def __init__(self, input_grid):
# . = ground == 0
# L = seat == 1
# # = occupied == 2
self.raw_input = input_grid
self.state = list()
self.ybound = len(input_grid)
self.xbound = len(input_grid)
# Construct grid that has 1x padding on all sides to avoid bounds
# Top Row
self.state.append([-1 for x in range(self.xbound + 2)])
for r in input_grid:
r = [int(x) for x in r.replace('.', '0').replace('L', '1')]
r = [-1] + r + [-1]
self.state.append(r)
# Bottom Row
self.state.append([-1 for x in range(self.xbound + 2)])
self.state = np.array(self.state)
self.previous_state = np.array([0])
def iterate_once(self):
# Use a new copy to fill seats, swap at the end
new_state_matrix = self.state.copy()
for (y, x), s in np.ndenumerate(self.state):
# Ignore cells on border
if y == 0:
continue
if x == 0:
continue
if y == len(self.state) - 1:
continue
if x == len(self.state[0]) - 1:
continue
if self.state[y, x] == 0:
# Ground
continue
seat_counts = self.free(y, x)
if seat_counts[False] == 0:
new_state_matrix[y, x] = 2
elif seat_counts[False] >= 4:
new_state_matrix[y, x] = 1
self.previous_state = self.state
self.state = new_state_matrix
def iterate_stable(self):
current_seats = self.get_filled_seats()
previous_seats = -1
while current_seats != previous_seats:
self.iterate_once()
previous_seats = current_seats
current_seats = self.get_filled_seats()
def free(self, y, x):
# Check the following cells
# 1 2 3
# 4 5
# 6 7 8
# Check in numerical order
seats_free = list()
for j in [-1, 0, 1]:
for i in [-1, 0 , 1]:
# Don't check our x, y
if i == 0 and j == 0:
continue
if self.state[y + j, x + i] < 2:
seats_free.append(True)
else:
seats_free.append(False)
seat_counts = Counter(seats_free)
return seat_counts
def get_filled_seats(self):
filled = 0
for (y, x), val in np.ndenumerate(self.state):
if val == 2:
filled += 1
return filled
def print(self):
for row in self.state:
row2 = list()
for r in row:
if r == -1:
row2.append(0)
else:
row2.append(r)
row = [str(x) for x in row2]
row = ''.join(row)
row = row.replace('0', '.').replace('1', 'L').replace('2', '#')
print(row)
def main(args):
"""
"""
seats = list()
with open(args.input, 'r') as fh:
seats = fh.read().split('\n')
b = gameoflife(seats)
b.iterate_stable()
b.print()
print(b.get_filled_seats())
if __name__ == '__main__':
desc = 'Advent 11a'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--input', type=str, help='Puzzle Input')
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python3
import argparse
import numpy as np
from collections import Counter
import sys
np.set_printoptions(threshold=sys.maxsize)
class gameoflife:
def __init__(self, input_grid):
# . = ground == 0
# L = seat == 1
# # = occupied == 2
self.raw_input = input_grid
self.state = list()
self.ybound = len(input_grid)
self.xbound = len(input_grid)
# Construct grid that has 1x padding on all sides to avoid bounds
# Top Row
self.state.append([-1 for x in range(self.xbound + 2)])
for r in input_grid:
r = [int(x) for x in r.replace('.', '0').replace('L', '1')]
r = [-1] + r + [-1]
self.state.append(r)
# Bottom Row
self.state.append([-1 for x in range(self.xbound + 2)])
self.state = np.array(self.state)
self.previous_state = np.array([0])
def iterate_once(self):
# Use a new copy to fill seats, swap at the end
new_state_matrix = self.state.copy()
for (y, x), s in np.ndenumerate(self.state):
# Ignore cells on border
if y == 0:
continue
if x == 0:
continue
if y == len(self.state) - 1:
continue
if x == len(self.state[0]) - 1:
continue
if self.state[y, x] == 0:
# Ground
continue
seat_counts = self.free(y, x)
if seat_counts[False] == 0:
new_state_matrix[y, x] = 2
elif seat_counts[False] >= 4:
new_state_matrix[y, x] = 1
self.previous_state = self.state
self.state = new_state_matrix
def iterate_stable(self):
current_seats = self.get_filled_seats()
previous_seats = -1
while current_seats != previous_seats:
self.iterate_once()
previous_seats = current_seats
current_seats = self.get_filled_seats()
def free(self, y, x):
# Check the following cells
# 1 2 3
# 4 5
# 6 7 8
# Check in numerical order
seats_free = list()
for j in [-1, 0, 1]:
for i in [-1, 0 , 1]:
# Don't check our x, y
if i == 0 and j == 0:
continue
if self.state[y + j, x + i] < 2:
seats_free.append(True)
else:
seats_free.append(False)
seat_counts = Counter(seats_free)
return seat_counts
def get_filled_seats(self):
filled = 0
for (y, x), val in np.ndenumerate(self.state):
if val == 2:
filled += 1
return filled
def print(self):
for row in self.state:
row2 = list()
for r in row:
if r == -1:
row2.append(0)
else:
row2.append(r)
row = [str(x) for x in row2]
row = ''.join(row)
row = row.replace('0', '.').replace('1', 'L').replace('2', '#')
print(row)
def main(args):
"""
"""
seats = list()
with open(args.input, 'r') as fh:
seats = fh.read().split('\n')
b = gameoflife(seats)
b.iterate_stable()
b.print()
print(b.get_filled_seats())
if __name__ == '__main__':
desc = 'Advent 11a'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--input', type=str, help='Puzzle Input')
args = parser.parse_args()
main(args)
|
en
| 0.778948
|
#!/usr/bin/env python3 # . = ground == 0 # L = seat == 1 # # = occupied == 2 # Construct grid that has 1x padding on all sides to avoid bounds # Top Row # Bottom Row # Use a new copy to fill seats, swap at the end # Ignore cells on border # Ground # Check the following cells # 1 2 3 # 4 5 # 6 7 8 # Check in numerical order # Don't check our x, y
| 3.010137
| 3
|
CursoSolyd/ex_aula_9_orientacao_a_objetos/main.py
|
cirino/python
| 1
|
6629230
|
from banco import Conta
print('''
Exercício 09 de Python
Curso da Solyd
Day 22 Code Python - 21/05/2018
''')
cliente1 = Conta(12345, 'dag', 29)
print(cliente1.cpf)
print(cliente1.nome)
print(cliente1.idade)
print(cliente1.saldo)
cliente1.deposito(2200)
print(cliente1.saldo)
cliente1.sacar(1300)
print(cliente1.saldo)
print('')
cliente2 = Conta(12233, 'mih', 24)
print(cliente2.cpf)
print(cliente2.nome)
print(cliente2.idade)
print(cliente2.saldo)
cliente2.deposito(1400)
print(cliente2.saldo)
cliente2.sacar(500)
print(cliente2.saldo)
|
from banco import Conta
print('''
Exercício 09 de Python
Curso da Solyd
Day 22 Code Python - 21/05/2018
''')
cliente1 = Conta(12345, 'dag', 29)
print(cliente1.cpf)
print(cliente1.nome)
print(cliente1.idade)
print(cliente1.saldo)
cliente1.deposito(2200)
print(cliente1.saldo)
cliente1.sacar(1300)
print(cliente1.saldo)
print('')
cliente2 = Conta(12233, 'mih', 24)
print(cliente2.cpf)
print(cliente2.nome)
print(cliente2.idade)
print(cliente2.saldo)
cliente2.deposito(1400)
print(cliente2.saldo)
cliente2.sacar(500)
print(cliente2.saldo)
|
pt
| 0.537495
|
Exercício 09 de Python Curso da Solyd Day 22 Code Python - 21/05/2018
| 3.070263
| 3
|
tests/flow/test_ts_madd.py
|
elena-kolevska/RedisTimeSeries
| 643
|
6629231
|
<reponame>elena-kolevska/RedisTimeSeries
import time
from RLTest import Env
def test_madd():
sample_len = 1024
Env().skipOnCluster()
with Env().getConnection() as r:
r.execute_command("ts.create", 'test_key1')
r.execute_command("ts.create", 'test_key2')
r.execute_command("ts.create", 'test_key3')
for i in range(sample_len):
assert [i + 1000, i + 3000, i + 6000] == r.execute_command("ts.madd", 'test_key1', i + 1000, i,
'test_key2', i + 3000, i, 'test_key3',
i + 6000, i, )
res = r.execute_command('ts.range', 'test_key1', 1000, 1000 + sample_len)
i = 0
for sample in res:
assert sample == [1000 + i, str(i).encode('ascii')]
i += 1
res = r.execute_command('ts.range', 'test_key2', 3000, 3000 + sample_len)
i = 0
for sample in res:
assert sample == [3000 + i, str(i).encode('ascii')]
i += 1
res = r.execute_command('ts.range', 'test_key3', 6000, 6000 + sample_len)
i = 0
for sample in res:
assert sample == [6000 + i, str(i).encode('ascii')]
i += 1
def test_ooo_madd():
sample_len = 100
start_ts = 1600204334000
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command("ts.create", 'test_key1')
last_sample = None
samples = []
for i in range(0, sample_len, 3):
assert [start_ts + (i * 1000 + 2000), start_ts + (i * 1000 + 1000),
start_ts + (i * 1000)] == r.execute_command("ts.madd", 'test_key1', start_ts + (i * 1000 + 2000), i,
'test_key1', start_ts + i * 1000 + 1000, i, 'test_key1',
start_ts + i * 1000, i)
samples.append([start_ts + (i * 1000), str(i).encode('ascii')])
samples.append([start_ts + (i * 1000 + 1000), str(i).encode('ascii')])
samples.append([start_ts + (i * 1000 + 2000), str(i).encode('ascii')])
last_sample = [start_ts + (i * 1000 + 2000), str(i).encode('ascii')]
assert r.execute_command('ts.get', 'test_key1') == last_sample
assert r.execute_command('ts.range', 'test_key1', '-', '+') == samples
def test_partial_madd():
Env().skipOnCluster()
with Env().getConnection() as r:
r.execute_command("ts.create", 'test_key1')
r.execute_command("ts.create", 'test_key2')
r.execute_command("ts.create", 'test_key3')
now = int(time.time() * 1000)
res = r.execute_command("ts.madd", 'test_key1', "*", 10, 'test_key2', 2000, 20, 'test_key3', 3000, 30)
assert now <= res[0]
assert 2000 == res[1]
assert 3000 == res[2]
res = r.execute_command("ts.madd", 'test_key1', now + 1000, 10, 'test_key2', 1000, 20, 'test_key3', 3001, 30)
assert (now + 1000, 1000, 3001) == (res[0], res[1], res[2])
assert len(r.execute_command('ts.range', 'test_key1', "-", "+")) == 2
assert len(r.execute_command('ts.range', 'test_key2', "-", "+")) == 2
assert len(r.execute_command('ts.range', 'test_key3', "-", "+")) == 2
def test_extensive_ts_madd():
Env().skipOnCluster()
with Env(decodeResponses=True).getConnection() as r:
r.execute_command("ts.create", 'test_key1')
r.execute_command("ts.create", 'test_key2')
pos = 1
lines = []
float_lines = []
with open("lemire_canada.txt","r") as file:
lines = file.readlines()
for line in lines:
float_v = float(line.strip())
res = r.execute_command("ts.madd", 'test_key1', pos, float_v, 'test_key2', pos, float_v)
assert res == [pos,pos]
pos=pos+1
float_lines.append(float_v)
returned_floats = r.execute_command('ts.range', 'test_key1', "-", "+")
assert len(returned_floats) == len(float_lines)
for pos,datapoint in enumerate(returned_floats,start=1):
assert pos == datapoint[0]
assert float_lines[pos-1] == float(datapoint[1])
|
import time
from RLTest import Env
def test_madd():
sample_len = 1024
Env().skipOnCluster()
with Env().getConnection() as r:
r.execute_command("ts.create", 'test_key1')
r.execute_command("ts.create", 'test_key2')
r.execute_command("ts.create", 'test_key3')
for i in range(sample_len):
assert [i + 1000, i + 3000, i + 6000] == r.execute_command("ts.madd", 'test_key1', i + 1000, i,
'test_key2', i + 3000, i, 'test_key3',
i + 6000, i, )
res = r.execute_command('ts.range', 'test_key1', 1000, 1000 + sample_len)
i = 0
for sample in res:
assert sample == [1000 + i, str(i).encode('ascii')]
i += 1
res = r.execute_command('ts.range', 'test_key2', 3000, 3000 + sample_len)
i = 0
for sample in res:
assert sample == [3000 + i, str(i).encode('ascii')]
i += 1
res = r.execute_command('ts.range', 'test_key3', 6000, 6000 + sample_len)
i = 0
for sample in res:
assert sample == [6000 + i, str(i).encode('ascii')]
i += 1
def test_ooo_madd():
sample_len = 100
start_ts = 1600204334000
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command("ts.create", 'test_key1')
last_sample = None
samples = []
for i in range(0, sample_len, 3):
assert [start_ts + (i * 1000 + 2000), start_ts + (i * 1000 + 1000),
start_ts + (i * 1000)] == r.execute_command("ts.madd", 'test_key1', start_ts + (i * 1000 + 2000), i,
'test_key1', start_ts + i * 1000 + 1000, i, 'test_key1',
start_ts + i * 1000, i)
samples.append([start_ts + (i * 1000), str(i).encode('ascii')])
samples.append([start_ts + (i * 1000 + 1000), str(i).encode('ascii')])
samples.append([start_ts + (i * 1000 + 2000), str(i).encode('ascii')])
last_sample = [start_ts + (i * 1000 + 2000), str(i).encode('ascii')]
assert r.execute_command('ts.get', 'test_key1') == last_sample
assert r.execute_command('ts.range', 'test_key1', '-', '+') == samples
def test_partial_madd():
Env().skipOnCluster()
with Env().getConnection() as r:
r.execute_command("ts.create", 'test_key1')
r.execute_command("ts.create", 'test_key2')
r.execute_command("ts.create", 'test_key3')
now = int(time.time() * 1000)
res = r.execute_command("ts.madd", 'test_key1', "*", 10, 'test_key2', 2000, 20, 'test_key3', 3000, 30)
assert now <= res[0]
assert 2000 == res[1]
assert 3000 == res[2]
res = r.execute_command("ts.madd", 'test_key1', now + 1000, 10, 'test_key2', 1000, 20, 'test_key3', 3001, 30)
assert (now + 1000, 1000, 3001) == (res[0], res[1], res[2])
assert len(r.execute_command('ts.range', 'test_key1', "-", "+")) == 2
assert len(r.execute_command('ts.range', 'test_key2', "-", "+")) == 2
assert len(r.execute_command('ts.range', 'test_key3', "-", "+")) == 2
def test_extensive_ts_madd():
Env().skipOnCluster()
with Env(decodeResponses=True).getConnection() as r:
r.execute_command("ts.create", 'test_key1')
r.execute_command("ts.create", 'test_key2')
pos = 1
lines = []
float_lines = []
with open("lemire_canada.txt","r") as file:
lines = file.readlines()
for line in lines:
float_v = float(line.strip())
res = r.execute_command("ts.madd", 'test_key1', pos, float_v, 'test_key2', pos, float_v)
assert res == [pos,pos]
pos=pos+1
float_lines.append(float_v)
returned_floats = r.execute_command('ts.range', 'test_key1', "-", "+")
assert len(returned_floats) == len(float_lines)
for pos,datapoint in enumerate(returned_floats,start=1):
assert pos == datapoint[0]
assert float_lines[pos-1] == float(datapoint[1])
|
none
| 1
| 1.962278
| 2
|
|
raspberry/IoT-workshop-BR_BSB-20150803/blink_led_simple.py
|
mtulio/kb
| 3
|
6629232
|
#!/usr/bin/python
import RPi.GPIO as Portas
Portas.setmode(Portas.BOARD);
Portas.setup(24,Portas.OUT);
#Portas.output(24,True);
Portas.output(24,False);
|
#!/usr/bin/python
import RPi.GPIO as Portas
Portas.setmode(Portas.BOARD);
Portas.setup(24,Portas.OUT);
#Portas.output(24,True);
Portas.output(24,False);
|
ru
| 0.128539
|
#!/usr/bin/python #Portas.output(24,True);
| 1.967765
| 2
|
examples/python-guide/dask/multiclass-classification.py
|
PyVCEchecker/LightGBM
| 8,890
|
6629233
|
import dask.array as da
from distributed import Client, LocalCluster
from sklearn.datasets import make_blobs
import lightgbm as lgb
if __name__ == "__main__":
print("loading data")
X, y = make_blobs(n_samples=1000, n_features=50, centers=3)
print("initializing a Dask cluster")
cluster = LocalCluster(n_workers=2)
client = Client(cluster)
print("created a Dask LocalCluster")
print("distributing training data on the Dask cluster")
dX = da.from_array(X, chunks=(100, 50))
dy = da.from_array(y, chunks=(100,))
print("beginning training")
dask_model = lgb.DaskLGBMClassifier(n_estimators=10)
dask_model.fit(dX, dy)
assert dask_model.fitted_
print("done training")
|
import dask.array as da
from distributed import Client, LocalCluster
from sklearn.datasets import make_blobs
import lightgbm as lgb
if __name__ == "__main__":
print("loading data")
X, y = make_blobs(n_samples=1000, n_features=50, centers=3)
print("initializing a Dask cluster")
cluster = LocalCluster(n_workers=2)
client = Client(cluster)
print("created a Dask LocalCluster")
print("distributing training data on the Dask cluster")
dX = da.from_array(X, chunks=(100, 50))
dy = da.from_array(y, chunks=(100,))
print("beginning training")
dask_model = lgb.DaskLGBMClassifier(n_estimators=10)
dask_model.fit(dX, dy)
assert dask_model.fitted_
print("done training")
|
none
| 1
| 2.696907
| 3
|
|
tests.py
|
edberoi/python-airmusicapi
| 0
|
6629234
|
"""
Test file to check functionality in Airmusic API towards Lenco DIR150BK.
"""
import json
import logging
import time
from airmusicapi import airmusic
IPADDR = '192.168.2.147' # Change this to the IP-address or hostname of your device.
TIMEOUT = 5 # in seconds. In most cases 1 second is sufficient.
def print_list(list_result):
"""!
Show the response from a list command in pretty print format.
@param list_result contains the result (dict) of the 'list' command.
"""
if 'result' in list_result:
print("Error: {}".format(list_result['result']))
return
print("List: {} out of {}:".format(list_result['item_total'], list_result['item_return']))
for entry in list_result['item']:
print(" {:5} {} -> {}".format(entry['id'], entry['name'], entry['status']))
def print_songinfo(api_ref):
"""!
Print the song information, as far as it is available.
@param api_ref is an Airmusic API instance.
"""
print("Press CTRL-C to interrupt.")
print("{:3} {:3} {}".format('Vol', 'sid', 'Status'))
try:
while True:
playinfo = api_ref.get_playinfo()
if 'result' in playinfo:
print(" ... {}".format(playinfo['result']))
else:
status = "{:3} {:3} {} ".format(playinfo['vol'], playinfo['sid'], playinfo['status'])
if 'artist' in playinfo:
status += "Artist:'{}' Song:'{}'".format(playinfo['artist'], playinfo['song'])
print(status)
time.sleep(0.5)
except KeyboardInterrupt:
pass
def main():
"""
Main part of the code. Checks some parts of the API against the Lenco DIR150BK radio.
"""
# Create an API instance and setup initial communication with the device.
am_obj = airmusic(IPADDR, TIMEOUT)
am_obj.log_level = logging.DEBUG
am_obj.init(language="en")
# Show device information.
print('Device Name: %s' % am_obj.friendly_name)
print(json.dumps(am_obj.get_systeminfo(), indent=2))
# Show volume and mute levels.
print("Current volume = {}".format(am_obj.volume))
print("Current mute = {}".format(am_obj.mute))
# Show the content of the hotkeylist.
hotkeylist = am_obj.get_hotkeylist()
print("Hotkeylist: {} out of {}:".format(hotkeylist['item_total'], hotkeylist['item_return']))
for itm in hotkeylist['item']:
print(" {}, {}, {}".format(itm['id'], itm['name'], itm['status']))
print("Verify navigation through menus to reach a station to play.")
print_list(am_obj.get_menu(menu_id=1))
am_obj.enter_menu(52)
print_list(am_obj.get_menu(menu_id=52))
am_obj.enter_menu(75)
print_list(am_obj.get_menu(menu_id=75))
am_obj.play_station('75_7')
print_songinfo(am_obj)
print("Going to play the radio station at hotkey 1.")
am_obj.play_hotkey(1)
print_songinfo(am_obj)
# ***************************************************************************
# MAIN
# ***************************************************************************
if __name__ == '__main__':
main()
|
"""
Test file to check functionality in Airmusic API towards Lenco DIR150BK.
"""
import json
import logging
import time
from airmusicapi import airmusic
IPADDR = '192.168.2.147' # Change this to the IP-address or hostname of your device.
TIMEOUT = 5 # in seconds. In most cases 1 second is sufficient.
def print_list(list_result):
"""!
Show the response from a list command in pretty print format.
@param list_result contains the result (dict) of the 'list' command.
"""
if 'result' in list_result:
print("Error: {}".format(list_result['result']))
return
print("List: {} out of {}:".format(list_result['item_total'], list_result['item_return']))
for entry in list_result['item']:
print(" {:5} {} -> {}".format(entry['id'], entry['name'], entry['status']))
def print_songinfo(api_ref):
"""!
Print the song information, as far as it is available.
@param api_ref is an Airmusic API instance.
"""
print("Press CTRL-C to interrupt.")
print("{:3} {:3} {}".format('Vol', 'sid', 'Status'))
try:
while True:
playinfo = api_ref.get_playinfo()
if 'result' in playinfo:
print(" ... {}".format(playinfo['result']))
else:
status = "{:3} {:3} {} ".format(playinfo['vol'], playinfo['sid'], playinfo['status'])
if 'artist' in playinfo:
status += "Artist:'{}' Song:'{}'".format(playinfo['artist'], playinfo['song'])
print(status)
time.sleep(0.5)
except KeyboardInterrupt:
pass
def main():
"""
Main part of the code. Checks some parts of the API against the Lenco DIR150BK radio.
"""
# Create an API instance and setup initial communication with the device.
am_obj = airmusic(IPADDR, TIMEOUT)
am_obj.log_level = logging.DEBUG
am_obj.init(language="en")
# Show device information.
print('Device Name: %s' % am_obj.friendly_name)
print(json.dumps(am_obj.get_systeminfo(), indent=2))
# Show volume and mute levels.
print("Current volume = {}".format(am_obj.volume))
print("Current mute = {}".format(am_obj.mute))
# Show the content of the hotkeylist.
hotkeylist = am_obj.get_hotkeylist()
print("Hotkeylist: {} out of {}:".format(hotkeylist['item_total'], hotkeylist['item_return']))
for itm in hotkeylist['item']:
print(" {}, {}, {}".format(itm['id'], itm['name'], itm['status']))
print("Verify navigation through menus to reach a station to play.")
print_list(am_obj.get_menu(menu_id=1))
am_obj.enter_menu(52)
print_list(am_obj.get_menu(menu_id=52))
am_obj.enter_menu(75)
print_list(am_obj.get_menu(menu_id=75))
am_obj.play_station('75_7')
print_songinfo(am_obj)
print("Going to play the radio station at hotkey 1.")
am_obj.play_hotkey(1)
print_songinfo(am_obj)
# ***************************************************************************
# MAIN
# ***************************************************************************
if __name__ == '__main__':
main()
|
en
| 0.774386
|
Test file to check functionality in Airmusic API towards Lenco DIR150BK. # Change this to the IP-address or hostname of your device. # in seconds. In most cases 1 second is sufficient. ! Show the response from a list command in pretty print format. @param list_result contains the result (dict) of the 'list' command. ! Print the song information, as far as it is available. @param api_ref is an Airmusic API instance. Main part of the code. Checks some parts of the API against the Lenco DIR150BK radio. # Create an API instance and setup initial communication with the device. # Show device information. # Show volume and mute levels. # Show the content of the hotkeylist. # *************************************************************************** # MAIN # ***************************************************************************
| 2.818831
| 3
|
testcenter_multithread/sequencer.py
|
kelakty/Testcenter
| 0
|
6629235
|
<reponame>kelakty/Testcenter
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QThread,pyqtSignal,QStandardPaths
from PyQt5.QtWidgets import QWidget,QFileDialog,QMessageBox
from globalvariable import GlobalVariable
import threading
import time
import re
import pandas as pd
from datetime import datetime
# test_sequence = [{"是否测试":True, "Name":"COM2", "SerialObj":"obj", "发送指令":"1","等待回显时间":1, "需匹配文本":"", "NoMatchLog":"Fail", "ClearLogBuffer":True, "SaveFile":"xxx.log", "Report":"xxx.xls"},
# {"是否测试":True, "Name":"COM2", "SerialObj":"obj", "发送指令":"2","等待回显时间":1, "需匹配文本":"interface", "ClearLogBuffer":True, "SaveFile":"xxx.log", "Report":"xxx.xls"}
# ]
class SequencerThreadWorker(QObject):
seq_to_main_trigger = pyqtSignal(object, str) #object为需要发送到的终端
main_to_seq_trigger = pyqtSignal(object, str)
finished = pyqtSignal()
choose_file_trigger = pyqtSignal()
def __init__(self,current_consolethread): #parent=None #current_consolethread
super(SequencerThreadWorker,self).__init__() #parent
#初始化时还未跑在线程中
#TODO 暂时关闭。以下代码会导致无法进入线程sequencer_init。原因未知
self.current_consolethread = current_consolethread
# print("当前终端线程是:", self.current_consolethread)
# self.current_index = GlobalVariable.mainwindow.find_dictionarylist_keyvalue_index(GlobalVariable.Console, "consolethread", self.current_consolethread)
# self.current_console_name = GlobalVariable.Console[self.current_index]["name"]
# print("开始sequencerThreadworker线程init初始化中")
# print('%-25s: %s, %s,' % ("sequencerThread_init", QThread.currentThread(), int(QThread.currentThreadId())))
# print('%-25s: %s, %s,' % ("sequencerThread_init", threading.current_thread().name, threading.current_thread().ident))
def sequencer_worker(self): #test_sequence
# print('%-25s: %s, %s,' % ("sequencer_init", QThread.currentThread(), int(QThread.currentThreadId())))
# print('%-25s: %s, %s,' % ("sequencer_init", threading.current_thread().name, threading.current_thread().ident))
# while True:
print(threading.current_thread().name)
time.sleep(3)
self.sequence_table(GlobalVariable.table_dict_list) #GlobalVariable.table_dict_list
# print('%-25s: %s, %s,' % ("sequencer_init", QThread.currentThread(), int(QThread.currentThreadId())))
# print('%-25s: %s, %s,' % ("sequencer_init", threading.current_thread().name, threading.current_thread().ident))
# self.test_sequence = test_sequence
# #接收sequence table的数据,并存放在线程中
# self.run_sequence_table(test_sequence)
self.finished.emit()
GlobalVariable.sequencer_working = False
def sequence_table(self, test_sequence_list):
print(test_sequence_list)
for test_info in test_sequence_list:
print("是否测试:",test_info["是否测试"])
if int(test_info["是否测试"]) != False: #表格读取到的是字符型,需要转换为整形
#是否有保存报告的路径,有则先选择保存报告路径.不能在线程中选择。
if test_info["选择报告文件"] != False and test_info["选择报告文件"] != "nan":
self.choose_file_trigger.emit()
GlobalVariable.sequencer_waiting_for_main = True
while GlobalVariable.sequencer_waiting_for_main == True:
pass #等待main的槽函数执行完sequencer_waiting_for_main ==False
print("发送指令:",test_info["发送指令"])
#发送指令不为空才发送
if test_info["发送指令"] != "" and test_info["发送指令"] != "nan":
# for command in test_info["发送指令"]:
# 发送命令command
command = str(test_info["发送指令"]).replace("\\r\\n", "\r\n")
print(command)
print(str(command))
# self.seq_to_main_trigger.emit(GlobalVariable.Console[0]["consolethread"], command )#self.current_consolethread
self.seq_to_main_trigger.emit(self.current_consolethread, command )
#是否弹出对话框
#超时时间
if test_info["等待回显时间"] != "nan":
Timeout = int(float(test_info["等待回显时间"]))
time.sleep(Timeout)
print("log缓存区:", GlobalVariable.log_data_buffer)
# 匹配log
if self.matchlog(test_info,GlobalVariable.log_data_buffer) == True and self.nomatchlog(test_info,GlobalVariable.log_data_buffer) == False:
#代表测试通过
print("匹配到所需log同时不匹配到fail等表测试通过")
if self.nomatchlog(test_info,GlobalVariable.log_data_buffer) == True:
print("匹配到fail的log")
#单独保存本次log文件
#清空本次log缓存
GlobalVariable.log_data_buffer = ""
#输出本次结果到报告
self.report_df = pd.read_excel(GlobalVariable.sequencer_choosefilename, sheet_name = '机框式交换机生测checklist')
print(self.report_df)
self.report_name = GlobalVariable.sequencer_choosefilename+"_%d%02d%02d_%d_%02d_%02d"% (datetime.now().year, datetime.now().month, datetime.now().day,datetime.now().hour,datetime.now().minute,datetime.now().second)+".xlsx"
self.report_writer = pd.ExcelWriter(self.report_name, engine='xlsxwriter')
wrap_format = self.report_writer.add_format({'text_wrap': True})
worksheet.write('F1', '', wrap_format)
self.report_df.to_excel(self.report_writer, sheet_name='机框式交换机生测checklist')
workbook = self.report_writer.book
worksheet = self.report_writer.sheets['机框式交换机生测checklist']
wrap_format = workbook.add_format({'text_wrap': True, 'border': 4,"valign":"center","align":"center"})
bold_format = workbook.add_format({'text_wrap': True, 'border': 1,"valign":"vcenter","align":"center",'bold': True,'fg_color': '#D7E4BC'})
border_format = workbook.add_format({"border":1,"valign":"vcenter","align":"center",'text_wrap': True})
test_spe_format = workbook.add_format({'text_wrap': True, 'border': 1,"valign":"center","align":"center",'bg_color':'#C5D9F1'})
worksheet.set_column('A1:A', 15,bold_format) #设置第一列"测试项"格式
worksheet.set_column('B1:B', 60, test_spe_format)#设置第二列“测试判断”格式
worksheet.set_column('C1:C', 8, bold_format)
worksheet.set_column('D1:D', 8, border_format)
worksheet.set_column('E1:E', 8, border_format)
worksheet.set_row(0,20)
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'top',
"align":"center",
'fg_color': '#CAE6F4',
'border': 1})
fail_format = workbook.add_format({'bold': True,'bg_color':'red','font_color': "black"})
pass_format = workbook.add_format({'bold': True,'bg_color':'green','font_color': "black"})
na_format = workbook.add_format({'bold': True,'bg_color':'#D9D9D9','font_color': "black"})
empty_format = workbook.add_format({'bold': True,'bg_color':'yellow'})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'"fail"',
'format': fail_format})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'"pass"',
'format': pass_format})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'"na"',
'format': na_format})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'"不适用"',
'format': na_format})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'""',
'format': empty_format})
# worksheet.autofilter(C1,C54,wrap_format)
#报告部分
#合并header单元格
merge_header_format = workbook.add_format({'text_wrap': True,'border': 1,"valign":"vcenter","align":"center",'bold': True,'bg_color': 'yellow',"font_size":20})
worksheet.merge_range('G1:J1',"测试报告", merge_header_format)
worksheet.write("F1","")
report_cell_format = workbook.add_format({'text_wrap': True,"valign":"vcenter","font_color":"blue",'border': 1})
# report_cell_format.set_border()
worksheet.conditional_format("G1:J7",{ 'type' : 'no_blanks','format':report_cell_format})
self.report_writer.save()
def received_console_log(self, serialobj, data):
#接收到的回显log处理
pass
# if data != False:
# print("sequencer线程接收到的数据是:", data)
# self.log_data_buffer += data
def matchlog(self, test_info, log_data_buffer):
#匹配log
# print("log缓存区:", log_data_buffer)
if re.findall(test_info["需匹配文本"], log_data_buffer) != []:
return True
else:
return False
def nomatchlog(self, test_info, log_data_buffer):
if re.findall(test_info["不能匹配到文本"], log_data_buffer) != []:
return True
else:
return False
def txt_import_to_sequencer_table(self):
pass
#导入前先检查每个数据格式是否正确
def excel_import_to_sequencer_table(self):
pass
#导入前先检查每个数据格式是否正确
def show_sequencer_table(self):
pass
def pause(self):
pass
def stop_run(self):
pass
def move_to_next_section(self):
pass
|
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QThread,pyqtSignal,QStandardPaths
from PyQt5.QtWidgets import QWidget,QFileDialog,QMessageBox
from globalvariable import GlobalVariable
import threading
import time
import re
import pandas as pd
from datetime import datetime
# test_sequence = [{"是否测试":True, "Name":"COM2", "SerialObj":"obj", "发送指令":"1","等待回显时间":1, "需匹配文本":"", "NoMatchLog":"Fail", "ClearLogBuffer":True, "SaveFile":"xxx.log", "Report":"xxx.xls"},
# {"是否测试":True, "Name":"COM2", "SerialObj":"obj", "发送指令":"2","等待回显时间":1, "需匹配文本":"interface", "ClearLogBuffer":True, "SaveFile":"xxx.log", "Report":"xxx.xls"}
# ]
class SequencerThreadWorker(QObject):
seq_to_main_trigger = pyqtSignal(object, str) #object为需要发送到的终端
main_to_seq_trigger = pyqtSignal(object, str)
finished = pyqtSignal()
choose_file_trigger = pyqtSignal()
def __init__(self,current_consolethread): #parent=None #current_consolethread
super(SequencerThreadWorker,self).__init__() #parent
#初始化时还未跑在线程中
#TODO 暂时关闭。以下代码会导致无法进入线程sequencer_init。原因未知
self.current_consolethread = current_consolethread
# print("当前终端线程是:", self.current_consolethread)
# self.current_index = GlobalVariable.mainwindow.find_dictionarylist_keyvalue_index(GlobalVariable.Console, "consolethread", self.current_consolethread)
# self.current_console_name = GlobalVariable.Console[self.current_index]["name"]
# print("开始sequencerThreadworker线程init初始化中")
# print('%-25s: %s, %s,' % ("sequencerThread_init", QThread.currentThread(), int(QThread.currentThreadId())))
# print('%-25s: %s, %s,' % ("sequencerThread_init", threading.current_thread().name, threading.current_thread().ident))
def sequencer_worker(self): #test_sequence
# print('%-25s: %s, %s,' % ("sequencer_init", QThread.currentThread(), int(QThread.currentThreadId())))
# print('%-25s: %s, %s,' % ("sequencer_init", threading.current_thread().name, threading.current_thread().ident))
# while True:
print(threading.current_thread().name)
time.sleep(3)
self.sequence_table(GlobalVariable.table_dict_list) #GlobalVariable.table_dict_list
# print('%-25s: %s, %s,' % ("sequencer_init", QThread.currentThread(), int(QThread.currentThreadId())))
# print('%-25s: %s, %s,' % ("sequencer_init", threading.current_thread().name, threading.current_thread().ident))
# self.test_sequence = test_sequence
# #接收sequence table的数据,并存放在线程中
# self.run_sequence_table(test_sequence)
self.finished.emit()
GlobalVariable.sequencer_working = False
def sequence_table(self, test_sequence_list):
print(test_sequence_list)
for test_info in test_sequence_list:
print("是否测试:",test_info["是否测试"])
if int(test_info["是否测试"]) != False: #表格读取到的是字符型,需要转换为整形
#是否有保存报告的路径,有则先选择保存报告路径.不能在线程中选择。
if test_info["选择报告文件"] != False and test_info["选择报告文件"] != "nan":
self.choose_file_trigger.emit()
GlobalVariable.sequencer_waiting_for_main = True
while GlobalVariable.sequencer_waiting_for_main == True:
pass #等待main的槽函数执行完sequencer_waiting_for_main ==False
print("发送指令:",test_info["发送指令"])
#发送指令不为空才发送
if test_info["发送指令"] != "" and test_info["发送指令"] != "nan":
# for command in test_info["发送指令"]:
# 发送命令command
command = str(test_info["发送指令"]).replace("\\r\\n", "\r\n")
print(command)
print(str(command))
# self.seq_to_main_trigger.emit(GlobalVariable.Console[0]["consolethread"], command )#self.current_consolethread
self.seq_to_main_trigger.emit(self.current_consolethread, command )
#是否弹出对话框
#超时时间
if test_info["等待回显时间"] != "nan":
Timeout = int(float(test_info["等待回显时间"]))
time.sleep(Timeout)
print("log缓存区:", GlobalVariable.log_data_buffer)
# 匹配log
if self.matchlog(test_info,GlobalVariable.log_data_buffer) == True and self.nomatchlog(test_info,GlobalVariable.log_data_buffer) == False:
#代表测试通过
print("匹配到所需log同时不匹配到fail等表测试通过")
if self.nomatchlog(test_info,GlobalVariable.log_data_buffer) == True:
print("匹配到fail的log")
#单独保存本次log文件
#清空本次log缓存
GlobalVariable.log_data_buffer = ""
#输出本次结果到报告
self.report_df = pd.read_excel(GlobalVariable.sequencer_choosefilename, sheet_name = '机框式交换机生测checklist')
print(self.report_df)
self.report_name = GlobalVariable.sequencer_choosefilename+"_%d%02d%02d_%d_%02d_%02d"% (datetime.now().year, datetime.now().month, datetime.now().day,datetime.now().hour,datetime.now().minute,datetime.now().second)+".xlsx"
self.report_writer = pd.ExcelWriter(self.report_name, engine='xlsxwriter')
wrap_format = self.report_writer.add_format({'text_wrap': True})
worksheet.write('F1', '', wrap_format)
self.report_df.to_excel(self.report_writer, sheet_name='机框式交换机生测checklist')
workbook = self.report_writer.book
worksheet = self.report_writer.sheets['机框式交换机生测checklist']
wrap_format = workbook.add_format({'text_wrap': True, 'border': 4,"valign":"center","align":"center"})
bold_format = workbook.add_format({'text_wrap': True, 'border': 1,"valign":"vcenter","align":"center",'bold': True,'fg_color': '#D7E4BC'})
border_format = workbook.add_format({"border":1,"valign":"vcenter","align":"center",'text_wrap': True})
test_spe_format = workbook.add_format({'text_wrap': True, 'border': 1,"valign":"center","align":"center",'bg_color':'#C5D9F1'})
worksheet.set_column('A1:A', 15,bold_format) #设置第一列"测试项"格式
worksheet.set_column('B1:B', 60, test_spe_format)#设置第二列“测试判断”格式
worksheet.set_column('C1:C', 8, bold_format)
worksheet.set_column('D1:D', 8, border_format)
worksheet.set_column('E1:E', 8, border_format)
worksheet.set_row(0,20)
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'top',
"align":"center",
'fg_color': '#CAE6F4',
'border': 1})
fail_format = workbook.add_format({'bold': True,'bg_color':'red','font_color': "black"})
pass_format = workbook.add_format({'bold': True,'bg_color':'green','font_color': "black"})
na_format = workbook.add_format({'bold': True,'bg_color':'#D9D9D9','font_color': "black"})
empty_format = workbook.add_format({'bold': True,'bg_color':'yellow'})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'"fail"',
'format': fail_format})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'"pass"',
'format': pass_format})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'"na"',
'format': na_format})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'"不适用"',
'format': na_format})
worksheet.conditional_format('C1:C54', {'type':'cell',
'criteria': 'equal to',
'value':'""',
'format': empty_format})
# worksheet.autofilter(C1,C54,wrap_format)
#报告部分
#合并header单元格
merge_header_format = workbook.add_format({'text_wrap': True,'border': 1,"valign":"vcenter","align":"center",'bold': True,'bg_color': 'yellow',"font_size":20})
worksheet.merge_range('G1:J1',"测试报告", merge_header_format)
worksheet.write("F1","")
report_cell_format = workbook.add_format({'text_wrap': True,"valign":"vcenter","font_color":"blue",'border': 1})
# report_cell_format.set_border()
worksheet.conditional_format("G1:J7",{ 'type' : 'no_blanks','format':report_cell_format})
self.report_writer.save()
def received_console_log(self, serialobj, data):
#接收到的回显log处理
pass
# if data != False:
# print("sequencer线程接收到的数据是:", data)
# self.log_data_buffer += data
def matchlog(self, test_info, log_data_buffer):
#匹配log
# print("log缓存区:", log_data_buffer)
if re.findall(test_info["需匹配文本"], log_data_buffer) != []:
return True
else:
return False
def nomatchlog(self, test_info, log_data_buffer):
if re.findall(test_info["不能匹配到文本"], log_data_buffer) != []:
return True
else:
return False
def txt_import_to_sequencer_table(self):
pass
#导入前先检查每个数据格式是否正确
def excel_import_to_sequencer_table(self):
pass
#导入前先检查每个数据格式是否正确
def show_sequencer_table(self):
pass
def pause(self):
pass
def stop_run(self):
pass
def move_to_next_section(self):
pass
|
en
| 0.195217
|
# test_sequence = [{"是否测试":True, "Name":"COM2", "SerialObj":"obj", "发送指令":"1","等待回显时间":1, "需匹配文本":"", "NoMatchLog":"Fail", "ClearLogBuffer":True, "SaveFile":"xxx.log", "Report":"xxx.xls"}, # {"是否测试":True, "Name":"COM2", "SerialObj":"obj", "发送指令":"2","等待回显时间":1, "需匹配文本":"interface", "ClearLogBuffer":True, "SaveFile":"xxx.log", "Report":"xxx.xls"} # ] #object为需要发送到的终端 #parent=None #current_consolethread #parent #初始化时还未跑在线程中 #TODO 暂时关闭。以下代码会导致无法进入线程sequencer_init。原因未知 # print("当前终端线程是:", self.current_consolethread) # self.current_index = GlobalVariable.mainwindow.find_dictionarylist_keyvalue_index(GlobalVariable.Console, "consolethread", self.current_consolethread) # self.current_console_name = GlobalVariable.Console[self.current_index]["name"] # print("开始sequencerThreadworker线程init初始化中") # print('%-25s: %s, %s,' % ("sequencerThread_init", QThread.currentThread(), int(QThread.currentThreadId()))) # print('%-25s: %s, %s,' % ("sequencerThread_init", threading.current_thread().name, threading.current_thread().ident)) #test_sequence # print('%-25s: %s, %s,' % ("sequencer_init", QThread.currentThread(), int(QThread.currentThreadId()))) # print('%-25s: %s, %s,' % ("sequencer_init", threading.current_thread().name, threading.current_thread().ident)) # while True: #GlobalVariable.table_dict_list # print('%-25s: %s, %s,' % ("sequencer_init", QThread.currentThread(), int(QThread.currentThreadId()))) # print('%-25s: %s, %s,' % ("sequencer_init", threading.current_thread().name, threading.current_thread().ident)) # self.test_sequence = test_sequence # #接收sequence table的数据,并存放在线程中 # self.run_sequence_table(test_sequence) #表格读取到的是字符型,需要转换为整形 #是否有保存报告的路径,有则先选择保存报告路径.不能在线程中选择。 #等待main的槽函数执行完sequencer_waiting_for_main ==False #发送指令不为空才发送 # for command in test_info["发送指令"]: # 发送命令command # self.seq_to_main_trigger.emit(GlobalVariable.Console[0]["consolethread"], command )#self.current_consolethread #是否弹出对话框 #超时时间 # 匹配log #代表测试通过 #单独保存本次log文件 #清空本次log缓存 #输出本次结果到报告 #设置第一列"测试项"格式 #设置第二列“测试判断”格式 # worksheet.autofilter(C1,C54,wrap_format) #报告部分 #合并header单元格 # report_cell_format.set_border() #接收到的回显log处理 # if data != False: # print("sequencer线程接收到的数据是:", data) # self.log_data_buffer += data #匹配log # print("log缓存区:", log_data_buffer) #导入前先检查每个数据格式是否正确 #导入前先检查每个数据格式是否正确
| 2.342592
| 2
|
bzt/modules/selenium.py
|
gulraiz14/Taurus
| 0
|
6629236
|
"""
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import shutil
import subprocess
import sys
import time
from abc import abstractmethod
import urwid
from bzt.engine import ScenarioExecutor, Scenario, FileLister
from bzt.modules.aggregator import ConsolidatingAggregator
from bzt.modules.console import WidgetProvider, PrioritizedWidget
from bzt.modules.jmeter import JTLReader
from bzt.six import string_types, text_type, etree, parse
from bzt.utils import RequiredTool, shell_exec, shutdown_process, JavaVM, TclLibrary, get_files_recursive
from bzt.utils import dehumanize_time, MirrorsManager, is_windows, BetterDict, get_full_path
try:
from pyvirtualdisplay.smartdisplay import SmartDisplay as Display
except ImportError:
from pyvirtualdisplay import Display
class SeleniumExecutor(ScenarioExecutor, WidgetProvider, FileLister):
"""
Selenium executor
:type virtual_display: Display
:type runner: AbstractTestRunner
"""
SELENIUM_DOWNLOAD_LINK = "http://selenium-release.storage.googleapis.com/{version}/" \
"selenium-server-standalone-{version}.0.jar"
SELENIUM_VERSION = "2.53"
JUNIT_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=junit/junit/" \
"{version}/junit-{version}.jar"
JUNIT_VERSION = "4.12"
JUNIT_MIRRORS_SOURCE = "http://search.maven.org/solrsearch/select?q=g%3A%22junit%22%20AND%20a%3A%22" \
"junit%22%20AND%20v%3A%22{version}%22&rows=20&wt=json".format(version=JUNIT_VERSION)
HAMCREST_DOWNLOAD_LINK = "https://hamcrest.googlecode.com/files/hamcrest-core-1.3.jar"
SUPPORTED_TYPES = [".py", ".jar", ".java"]
SHARED_VIRTUAL_DISPLAY = {}
def __init__(self):
super(SeleniumExecutor, self).__init__()
self.additional_env = {}
self.virtual_display = None
self.start_time = None
self.end_time = None
self.runner = None
self.widget = None
self.reader = None
self.kpi_file = None
self.err_jtl = None
self.runner_working_dir = None
self.scenario = None
self.script = None
self.self_generated_script = False
def set_virtual_display(self):
display_conf = self.settings.get("virtual-display")
if display_conf:
if is_windows():
self.log.warning("Cannot have virtual display on Windows, ignoring")
else:
if self.engine in SeleniumExecutor.SHARED_VIRTUAL_DISPLAY:
self.virtual_display = SeleniumExecutor.SHARED_VIRTUAL_DISPLAY[self.engine]
else:
width = display_conf.get("width", 1024)
height = display_conf.get("height", 768)
self.virtual_display = Display(size=(width, height))
msg = "Starting virtual display[%s]: %s"
self.log.info(msg, self.virtual_display.size, self.virtual_display.new_display_var)
self.virtual_display.start()
SeleniumExecutor.SHARED_VIRTUAL_DISPLAY[self.engine] = self.virtual_display
def free_virtual_display(self):
if self.virtual_display and self.virtual_display.is_alive():
self.virtual_display.stop()
if self.engine in SeleniumExecutor.SHARED_VIRTUAL_DISPLAY:
del SeleniumExecutor.SHARED_VIRTUAL_DISPLAY[self.engine]
def get_script_path(self, scenario=None):
if scenario:
return super(SeleniumExecutor, self).get_script_path(scenario)
else:
return self.engine.find_file(self.script)
def _create_runner(self, working_dir, kpi_file, err_file):
script_path = self.get_script_path()
script_type = self.detect_script_type(script_path)
runner_config = BetterDict()
if script_type == ".py":
runner_class = NoseTester
runner_config.merge(self.settings.get("selenium-tools").get("nose"))
else: # script_type == ".jar" or script_type == ".java":
runner_class = JUnitTester
runner_config.merge(self.settings.get("selenium-tools").get("junit"))
runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties")
runner_config["script-type"] = script_type
runner_config["working-dir"] = working_dir
runner_config.get("artifacts-dir", self.engine.artifacts_dir)
runner_config.get("report-file", kpi_file)
runner_config.get("err-file", err_file)
runner_config.get("stdout", self.engine.create_artifact("junit", ".out"))
runner_config.get("stderr", self.engine.create_artifact("junit", ".err"))
return runner_class(runner_config, self)
def _create_reader(self, kpi_file, err_file):
return JTLReader(kpi_file, self.log, err_file)
def prepare(self):
self.set_virtual_display()
self.scenario = self.get_scenario()
self._verify_script()
self.runner_working_dir = self.engine.create_artifact("classes", "")
self.kpi_file = self.engine.create_artifact("selenium_tests_report", ".csv")
self.err_file = self.engine.create_artifact("selenium_tests_err", ".xml")
self.runner = self._create_runner(self.runner_working_dir, self.kpi_file, self.err_file)
self._cp_resource_files(self.runner_working_dir)
self.runner.prepare()
self.reader = self._create_reader(self.kpi_file, self.err_file)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
def _verify_script(self):
if Scenario.SCRIPT in self.scenario:
self.script = self.scenario.get(Scenario.SCRIPT)
elif "requests" in self.scenario:
self.script = self.__tests_from_requests()
self.self_generated_script = True
else:
raise ValueError("Nothing to test, no requests were provided in scenario")
def _cp_resource_files(self, runner_working_dir):
script = self.get_script_path()
if os.path.isdir(script):
shutil.copytree(script, runner_working_dir)
else:
os.makedirs(runner_working_dir)
if self.self_generated_script:
shutil.move(script, runner_working_dir)
else:
script_type = self.detect_script_type(script)
script_name = os.path.basename(script)
if script_type == ".py" and not script_name.lower().startswith('test'):
target_name = 'test_' + script_name
msg = "Script '%s' won't be discovered by nosetests, renaming script to %s"
self.log.warning(msg, script_name, target_name)
else:
target_name = script_name
target_path = os.path.join(runner_working_dir, target_name)
shutil.copy2(script, target_path)
@staticmethod
def detect_script_type(script_path):
if not isinstance(script_path, string_types) and not isinstance(script_path, text_type):
raise ValueError("Nothing to test, no files were provided in scenario")
if not os.path.exists(script_path):
raise ValueError("Script %s doesn't exist" % script_path)
file_types = set()
if os.path.isfile(script_path): # regular file received
file_types.add(os.path.splitext(script_path)[1].lower())
else: # dir received: check contained files
for file_name in get_files_recursive(script_path):
file_types.add(os.path.splitext(file_name)[1].lower())
if '.java' in file_types:
file_ext = '.java'
elif '.py' in file_types:
file_ext = '.py'
elif '.jar' in file_types:
file_ext = '.jar'
else:
raise ValueError("Unsupported script type: %s" % script_path)
return file_ext
def startup(self):
"""
Start runner
:return:
"""
self.start_time = time.time()
self.runner.env = self.additional_env
self.runner.run_tests()
def check_virtual_display(self):
if self.virtual_display:
if not self.virtual_display.is_alive():
self.log.info("Virtual display out: %s", self.virtual_display.stdout)
self.log.warning("Virtual display err: %s", self.virtual_display.stderr)
raise RuntimeError("Virtual display failed: %s" % self.virtual_display.return_code)
def check(self):
"""
check if test completed
:return:
"""
if self.widget:
self.widget.update()
self.check_virtual_display()
return self.runner.is_finished()
def report_test_duration(self):
if self.start_time:
self.end_time = time.time()
self.log.debug("Selenium tests ran for %s seconds", self.end_time - self.start_time)
def shutdown(self):
"""
shutdown test_runner
:return:
"""
self.runner.shutdown()
self.report_test_duration()
def post_process(self):
self.free_virtual_display()
if self.reader and not self.reader.read_records:
raise RuntimeWarning("Empty results, most likely Selenium failed")
def get_widget(self):
if not self.widget:
self.widget = SeleniumWidget(self.script, self.runner.settings.get("stdout"))
return self.widget
def resource_files(self):
self.scenario = self.get_scenario()
self._verify_script()
script_path = self.get_script_path()
resources = []
if script_path is not None:
resources.append(script_path)
return resources
def __tests_from_requests(self):
filename = self.engine.create_artifact("test_requests", ".py")
nose_test = SeleniumScriptBuilder(self.scenario, self.log)
if self.virtual_display:
nose_test.window_size = self.virtual_display.size
nose_test.gen_test_case()
nose_test.save(filename)
return filename
class AbstractTestRunner(object):
"""
Abstract test runner
"""
def __init__(self, settings, executor):
self.process = None
self.settings = settings
self.required_tools = []
self.executor = executor
self.scenario = executor.scenario
self.load = executor.get_load()
self.artifacts_dir = self.settings.get("artifacts-dir")
self.working_dir = self.settings.get("working-dir")
self.log = executor.log.getChild(self.__class__.__name__)
self.opened_descriptors = []
self.is_failed = False
self.env = {}
@abstractmethod
def prepare(self):
pass
@abstractmethod
def run_checklist(self):
pass
@abstractmethod
def run_tests(self):
pass
def is_finished(self):
ret_code = self.process.poll()
if ret_code is not None:
if ret_code != 0:
self.log.debug("Test runner exit code: %s", ret_code)
with open(self.settings.get("stderr")) as fds:
std_err = fds.read()
self.is_failed = True
raise RuntimeError("Test runner %s has failed: %s" % (self.__class__.__name__, std_err.strip()))
return True
return False
def check_tools(self):
for tool in self.required_tools:
if not tool.check_if_installed():
self.log.info("Installing %s", tool.tool_name)
tool.install()
def shutdown(self):
shutdown_process(self.process, self.log)
for desc in self.opened_descriptors:
desc.close()
self.opened_descriptors = []
class JUnitTester(AbstractTestRunner):
"""
Allows to test java and jar files
"""
def __init__(self, junit_config, executor):
"""
:type junit_config: BetterDict
"""
super(JUnitTester, self).__init__(junit_config, executor)
self.props_file = junit_config['props-file']
path_lambda = lambda key, val: get_full_path(self.settings.get(key, val))
self.junit_path = path_lambda("path", "~/.bzt/selenium-taurus/tools/junit/junit.jar")
self.hamcrest_path = path_lambda("hamcrest-core", "~/.bzt/selenium-taurus/tools/junit/hamcrest-core.jar")
self.selenium_server_jar_path = path_lambda("selenium-server",
"~/.bzt/selenium-taurus/selenium-server.jar")
self.junit_listener_path = os.path.join(get_full_path(__file__, step_up=1),
os.pardir,
"resources",
"taurus-junit-1.0.jar")
self.target_java = str(junit_config.get("compile-target-java", "1.7"))
self.base_class_path = [self.selenium_server_jar_path, self.junit_path, self.junit_listener_path,
self.hamcrest_path]
self.base_class_path.extend(self.scenario.get("additional-classpath", []))
def prepare(self):
"""
run checklist, make jar.
"""
self.run_checklist()
if self.settings.get("script-type", None) == ".java":
self.compile_scripts()
def run_checklist(self):
"""
java
javac
selenium-server.jar
junit.jar
junit_listener.jar
"""
# only check javac if we need to compile. if we have JAR as script - we don't need javac
if self.settings.get("script-type", None) == ".java":
self.required_tools.append(JavaC("", "", self.log))
self.required_tools.append(TclLibrary(self.log))
self.required_tools.append(JavaVM("", "", self.log))
link = SeleniumExecutor.SELENIUM_DOWNLOAD_LINK.format(version=SeleniumExecutor.SELENIUM_VERSION)
self.required_tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log))
self.required_tools.append(JUnitJar(self.junit_path, self.log, SeleniumExecutor.JUNIT_VERSION))
self.required_tools.append(HamcrestJar(self.hamcrest_path, SeleniumExecutor.HAMCREST_DOWNLOAD_LINK))
self.required_tools.append(JUnitListenerJar(self.junit_listener_path, ""))
self.check_tools()
def compile_scripts(self):
"""
Compile .java files
"""
self.log.debug("Compiling .java files started")
jar_path = os.path.join(self.executor.engine.artifacts_dir,
self.working_dir,
self.settings.get("jar-name", "compiled.jar"))
if os.path.exists(jar_path):
self.log.debug(".java files are already compiled, skipping")
return
java_files = []
for dir_entry in os.walk(self.working_dir):
if dir_entry[2]:
for test_file in dir_entry[2]:
if os.path.splitext(test_file)[1].lower() == ".java":
java_files.append(os.path.join(dir_entry[0], test_file))
compile_cl = ["javac", "-source", self.target_java, "-target", self.target_java, ]
compile_cl.extend(["-cp", os.pathsep.join(self.base_class_path)])
compile_cl.extend(java_files)
with open(os.path.join(self.artifacts_dir, "javac.out"), 'ab') as javac_out:
with open(os.path.join(self.artifacts_dir, "javac.err"), 'ab') as javac_err:
self.log.debug("running javac: %s", compile_cl)
self.process = shell_exec(compile_cl, cwd=self.working_dir, stdout=javac_out, stderr=javac_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Compiling .java files...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
self.log.debug("javac exit code: %s", ret_code)
with open(javac_err.name) as err_file:
out = err_file.read()
raise RuntimeError("Javac exited with error:\n %s" % out.strip())
self.log.info("Compiling .java files completed")
self.make_jar()
def make_jar(self):
"""
move all .class files to compiled.jar
"""
self.log.debug("Making .jar started")
with open(os.path.join(self.artifacts_dir, "jar.out"), 'ab') as jar_out:
with open(os.path.join(self.artifacts_dir, "jar.err"), 'ab') as jar_err:
class_files = [java_file for java_file in os.listdir(self.working_dir) if java_file.endswith(".class")]
jar_name = self.settings.get("jar-name", "compiled.jar")
if class_files:
compile_jar_cl = ["jar", "-cf", jar_name]
compile_jar_cl.extend(class_files)
else:
package_dir = os.listdir(self.working_dir)[0]
compile_jar_cl = ["jar", "-cf", jar_name, "-C", package_dir, "."]
self.log.debug("running jar: %s", compile_jar_cl)
self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Making jar file...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
with open(jar_err.name) as err_file:
out = err_file.read()
self.log.info("Making jar failed with code %s", ret_code)
self.log.info("jar output: %s", out)
raise RuntimeError("Jar exited with non-zero code")
self.log.info("Making .jar file completed")
def run_tests(self):
# java -cp junit.jar:selenium-test-small.jar:
# selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar
# org.junit.runner.JUnitCore TestBlazemeterPass
jar_list = [os.path.join(self.working_dir, jar) for jar in os.listdir(self.working_dir) if jar.endswith(".jar")]
self.base_class_path.extend(jar_list)
with open(self.props_file, 'wt') as props:
props.write("kpi_log=%s\n" % self.settings.get("report-file").replace(os.path.sep, '/'))
props.write("error_log=%s\n" % self.settings.get("err-file").replace(os.path.sep, '/'))
if self.load.iterations:
props.write("iterations=%s\n" % self.load.iterations)
if self.load.hold:
props.write("hold_for=%s\n" % self.load.hold)
for index, item in enumerate(jar_list):
props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/')))
std_out = open(self.settings.get("stdout"), "wt")
self.opened_descriptors.append(std_out)
std_err = open(self.settings.get("stderr"), "wt")
self.opened_descriptors.append(std_err)
env = BetterDict()
env.merge(dict(os.environ))
env.merge(self.env)
junit_command_line = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurusjunit.CustomRunner",
self.props_file]
self.process = self.executor.execute(junit_command_line,
cwd=self.artifacts_dir,
stdout=std_out,
stderr=std_err,
env=env)
class NoseTester(AbstractTestRunner):
"""
Python selenium tests runner
"""
def __init__(self, nose_config, executor):
super(NoseTester, self).__init__(nose_config, executor)
self.plugin_path = os.path.join(get_full_path(__file__, step_up=1),
os.pardir,
"resources",
"nose_plugin.py")
def prepare(self):
self.run_checklist()
def run_checklist(self):
"""
we need installed nose plugin
"""
if sys.version >= '3':
self.log.warn("You are using python3, make sure that your scripts are able to run in python3!")
self.required_tools.append(TclLibrary(self.log))
self.required_tools.append(TaurusNosePlugin(self.plugin_path, ""))
self.check_tools()
def run_tests(self):
"""
run python tests
"""
executable = self.settings.get("interpreter", sys.executable)
nose_command_line = [executable, self.plugin_path, '-k', self.settings.get("report-file"),
'-e', self.settings.get("err-file")]
if self.load.iterations:
nose_command_line += ['-i', str(self.load.iterations)]
if self.load.hold:
nose_command_line += ['-d', str(self.load.hold)]
nose_command_line += [self.working_dir]
std_out = open(self.settings.get("stdout"), "wt")
self.opened_descriptors.append(std_out)
std_err = open(self.settings.get("stderr"), "wt")
self.opened_descriptors.append(std_err)
env = BetterDict()
env.merge(dict(os.environ))
env.merge(self.env)
self.process = self.executor.execute(nose_command_line,
cwd=self.artifacts_dir,
stdout=std_out,
stderr=std_err,
env=env)
class SeleniumWidget(urwid.Pile, PrioritizedWidget):
def __init__(self, script, runner_output):
widgets = []
self.script_name = urwid.Text("Tests: %s" % script)
self.summary_stats = urwid.Text("")
self.current_test = urwid.Text("")
self.runner_output = runner_output
widgets.append(self.script_name)
widgets.append(self.summary_stats)
widgets.append(self.current_test)
super(SeleniumWidget, self).__init__(widgets)
PrioritizedWidget.__init__(self, priority=10)
def update(self):
cur_test, reader_summary = ["No data received yet"] * 2
if os.path.exists(self.runner_output):
with open(self.runner_output, "rt") as fds:
lines = fds.readlines()
if lines:
line = lines[-1]
if line and "," in line:
cur_test, reader_summary = line.split(",")
self.current_test.set_text(cur_test)
self.summary_stats.set_text(reader_summary)
self._invalidate()
class SeleniumServerJar(RequiredTool):
def __init__(self, tool_path, download_link, parent_logger):
super(SeleniumServerJar, self).__init__("Selenium server", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
self.log.debug("%s path: %s", self.tool_name, self.tool_path)
selenium_launch_command = ["java", "-jar", self.tool_path, "-help"]
selenium_subproc = shell_exec(selenium_launch_command, stderr=subprocess.STDOUT)
output = selenium_subproc.communicate()
self.log.debug("%s output: %s", self.tool_name, output)
if selenium_subproc.returncode == 0:
self.already_installed = True
return True
else:
return False
class JUnitJar(RequiredTool):
def __init__(self, tool_path, parent_logger, junit_version):
super(JUnitJar, self).__init__("JUnit", tool_path)
self.log = parent_logger.getChild(self.__class__.__name__)
self.version = junit_version
self.mirror_manager = JUnitMirrorsManager(self.log, self.version)
def install(self):
dest = get_full_path(self.tool_path, step_up=1)
dest = os.path.abspath(dest)
junit_dist = super(JUnitJar, self).install_with_mirrors(dest, ".jar")
self.log.info("Installing %s into %s", self.tool_name, dest)
junit_dist.close()
if not os.path.exists(dest):
os.makedirs(dest)
shutil.move(junit_dist.name, self.tool_path)
self.log.info("Installed JUnit successfully")
if not self.check_if_installed():
raise RuntimeError("Unable to run %s after installation!" % self.tool_name)
class HamcrestJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(HamcrestJar, self).__init__("HamcrestJar", tool_path, download_link)
class JavaC(RequiredTool):
def __init__(self, tool_path, download_link, parent_logger):
super(JavaC, self).__init__("JavaC", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
try:
output = subprocess.check_output(["javac", '-version'], stderr=subprocess.STDOUT)
self.log.debug("%s output: %s", self.tool_name, output)
return True
except BaseException:
raise RuntimeError("The %s is not operable or not available. Consider installing it" % self.tool_name)
def install(self):
raise NotImplementedError()
class JUnitListenerJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(JUnitListenerJar, self).__init__("JUnitListener", tool_path, download_link)
def install(self):
raise NotImplementedError()
class TaurusNosePlugin(RequiredTool):
def __init__(self, tool_path, download_link):
super(TaurusNosePlugin, self).__init__("TaurusNosePlugin", tool_path, download_link)
def install(self):
raise NotImplementedError()
class NoseTest(object):
IMPORTS = """import unittest
import re
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
"""
def __init__(self):
self.root = etree.Element("NoseTest")
self.tree = etree.ElementTree(self.root)
def add_imports(self):
imports = etree.Element("imports")
imports.text = NoseTest.IMPORTS
return imports
def gen_class_definition(self, class_name, inherits_from, indent="0"):
def_tmpl = "class {class_name}({inherits_from}):"
class_def_element = etree.Element("class_definition", indent=indent)
class_def_element.text = def_tmpl.format(class_name=class_name, inherits_from="".join(inherits_from))
return class_def_element
def gen_method_definition(self, method_name, params, indent="4"):
def_tmpl = "def {method_name}({params}):"
method_def_element = etree.Element("method_definition", indent=indent)
method_def_element.text = def_tmpl.format(method_name=method_name, params=",".join(params))
return method_def_element
def gen_method_statement(self, statement, indent="8"):
statement_elem = etree.Element("statement", indent=indent)
statement_elem.text = statement
return statement_elem
class SeleniumScriptBuilder(NoseTest):
def __init__(self, scenario, parent_logger):
super(SeleniumScriptBuilder, self).__init__()
self.window_size = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.scenario = scenario
def gen_test_case(self):
self.log.debug("Generating Test Case test method")
imports = self.add_imports()
self.root.append(imports)
test_class = self.gen_class_definition("TestRequests", ["unittest.TestCase"])
self.root.append(test_class)
test_class.append(self.gen_setup_method())
requests = self.scenario.get_requests()
test_method = self.gen_test_method()
test_class.append(test_method)
scenario_timeout = self.scenario.get("timeout", 30)
default_address = self.scenario.get("default-address", None)
for req in requests:
parsed_url = parse.urlparse(req.url)
if default_address is not None and not parsed_url.netloc:
url = default_address + req.url
else:
url = req.url
test_method.append(self.gen_comment("start request: %s" % url))
if req.timeout is not None:
test_method.append(self.gen_impl_wait(req.timeout))
test_method.append(self.gen_method_statement("self.driver.get('%s')" % url))
think_time = req.think_time if req.think_time else self.scenario.get("think-time", None)
if think_time is not None:
test_method.append(self.gen_method_statement("sleep(%s)" % dehumanize_time(think_time)))
if "assert" in req.config:
test_method.append(self.__gen_assert_page())
for assert_config in req.config.get("assert"):
test_method.extend(self.gen_assertion(assert_config))
if req.timeout is not None:
test_method.append(self.gen_impl_wait(scenario_timeout))
test_method.append(self.gen_comment("end request: %s" % url))
test_method.append(self.__gen_new_line())
test_class.append(self.gen_teardown_method())
def gen_setup_method(self):
self.log.debug("Generating setUp test method")
browsers = ["Firefox", "Chrome", "Ie", "Opera"]
browser = self.scenario.get("browser", "Firefox")
if browser not in browsers:
raise ValueError("Unsupported browser name: %s" % browser)
setup_method_def = self.gen_method_definition("setUp", ["self"])
setup_method_def.append(self.gen_method_statement("self.driver=webdriver.%s()" % browser))
scenario_timeout = self.scenario.get("timeout", 30)
setup_method_def.append(self.gen_impl_wait(scenario_timeout))
if self.window_size:
setup_method_def.append(self.gen_method_statement("self.driver.set_window_size(%s, %s)" % self.window_size))
else:
setup_method_def.append(self.gen_method_statement("self.driver.maximize_window()"))
setup_method_def.append(self.__gen_new_line())
return setup_method_def
def gen_impl_wait(self, timeout):
return self.gen_method_statement("self.driver.implicitly_wait(%s)" % dehumanize_time(timeout))
def gen_comment(self, comment):
return self.gen_method_statement("# %s" % comment)
def gen_test_method(self):
self.log.debug("Generating test method")
test_method = self.gen_method_definition("test_method", ["self"])
return test_method
def gen_teardown_method(self):
self.log.debug("Generating tearDown test method")
tear_down_method_def = self.gen_method_definition("tearDown", ["self"])
tear_down_method_def.append(self.gen_method_statement("self.driver.quit()"))
return tear_down_method_def
def gen_assertion(self, assertion_config):
self.log.debug("Generating assertion, config: %s", assertion_config)
assertion_elements = []
if isinstance(assertion_config, string_types):
assertion_config = {"contains": [assertion_config]}
for val in assertion_config["contains"]:
regexp = assertion_config.get("regexp", True)
reverse = assertion_config.get("not", False)
subject = assertion_config.get("subject", "body")
if subject != "body":
raise ValueError("Only 'body' subject supported ")
if regexp:
assert_method = "self.assertEqual" if reverse else "self.assertNotEqual"
assertion_elements.append(self.gen_method_statement('re_pattern = re.compile("%s")' % val))
method = '%s(0, len(re.findall(re_pattern, body)))' % assert_method
assertion_elements.append(self.gen_method_statement(method))
else:
assert_method = "self.assertNotIn" if reverse else "self.assertIn"
assertion_elements.append(self.gen_method_statement('%s("%s", body)' % (assert_method, val)))
return assertion_elements
def __gen_new_line(self, indent="8"):
return self.gen_method_statement("", indent=indent)
def __gen_assert_page(self):
return self.gen_method_statement("body = self.driver.page_source")
def save(self, filename):
with open(filename, 'wt') as fds:
for child in self.root.iter():
if child.text is not None:
indent = int(child.get('indent', "0"))
fds.write(" " * indent + child.text + "\n")
class JUnitMirrorsManager(MirrorsManager):
def __init__(self, parent_logger, junit_version):
self.junit_version = junit_version
super(JUnitMirrorsManager, self).__init__(SeleniumExecutor.JUNIT_MIRRORS_SOURCE, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
try:
resp = json.loads(self.page_source)
objects = resp.get("response", {}).get("docs", [])
if objects:
obj = objects[0]
group = obj.get("g")
artifact = obj.get("a")
version = obj.get("v")
ext = obj.get("p")
link_template = "http://search.maven.org/remotecontent?filepath={group}/{artifact}/" \
"{version}/{artifact}-{version}.{ext}"
link = link_template.format(group=group, artifact=artifact, version=version, ext=ext)
links.append(link)
except BaseException as exc:
self.log.error("Error while parsing mirrors %s", exc)
default_link = SeleniumExecutor.JUNIT_DOWNLOAD_LINK.format(version=self.junit_version)
if default_link not in links:
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
|
"""
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import shutil
import subprocess
import sys
import time
from abc import abstractmethod
import urwid
from bzt.engine import ScenarioExecutor, Scenario, FileLister
from bzt.modules.aggregator import ConsolidatingAggregator
from bzt.modules.console import WidgetProvider, PrioritizedWidget
from bzt.modules.jmeter import JTLReader
from bzt.six import string_types, text_type, etree, parse
from bzt.utils import RequiredTool, shell_exec, shutdown_process, JavaVM, TclLibrary, get_files_recursive
from bzt.utils import dehumanize_time, MirrorsManager, is_windows, BetterDict, get_full_path
try:
from pyvirtualdisplay.smartdisplay import SmartDisplay as Display
except ImportError:
from pyvirtualdisplay import Display
class SeleniumExecutor(ScenarioExecutor, WidgetProvider, FileLister):
"""
Selenium executor
:type virtual_display: Display
:type runner: AbstractTestRunner
"""
SELENIUM_DOWNLOAD_LINK = "http://selenium-release.storage.googleapis.com/{version}/" \
"selenium-server-standalone-{version}.0.jar"
SELENIUM_VERSION = "2.53"
JUNIT_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=junit/junit/" \
"{version}/junit-{version}.jar"
JUNIT_VERSION = "4.12"
JUNIT_MIRRORS_SOURCE = "http://search.maven.org/solrsearch/select?q=g%3A%22junit%22%20AND%20a%3A%22" \
"junit%22%20AND%20v%3A%22{version}%22&rows=20&wt=json".format(version=JUNIT_VERSION)
HAMCREST_DOWNLOAD_LINK = "https://hamcrest.googlecode.com/files/hamcrest-core-1.3.jar"
SUPPORTED_TYPES = [".py", ".jar", ".java"]
SHARED_VIRTUAL_DISPLAY = {}
def __init__(self):
super(SeleniumExecutor, self).__init__()
self.additional_env = {}
self.virtual_display = None
self.start_time = None
self.end_time = None
self.runner = None
self.widget = None
self.reader = None
self.kpi_file = None
self.err_jtl = None
self.runner_working_dir = None
self.scenario = None
self.script = None
self.self_generated_script = False
def set_virtual_display(self):
display_conf = self.settings.get("virtual-display")
if display_conf:
if is_windows():
self.log.warning("Cannot have virtual display on Windows, ignoring")
else:
if self.engine in SeleniumExecutor.SHARED_VIRTUAL_DISPLAY:
self.virtual_display = SeleniumExecutor.SHARED_VIRTUAL_DISPLAY[self.engine]
else:
width = display_conf.get("width", 1024)
height = display_conf.get("height", 768)
self.virtual_display = Display(size=(width, height))
msg = "Starting virtual display[%s]: %s"
self.log.info(msg, self.virtual_display.size, self.virtual_display.new_display_var)
self.virtual_display.start()
SeleniumExecutor.SHARED_VIRTUAL_DISPLAY[self.engine] = self.virtual_display
def free_virtual_display(self):
if self.virtual_display and self.virtual_display.is_alive():
self.virtual_display.stop()
if self.engine in SeleniumExecutor.SHARED_VIRTUAL_DISPLAY:
del SeleniumExecutor.SHARED_VIRTUAL_DISPLAY[self.engine]
def get_script_path(self, scenario=None):
if scenario:
return super(SeleniumExecutor, self).get_script_path(scenario)
else:
return self.engine.find_file(self.script)
def _create_runner(self, working_dir, kpi_file, err_file):
script_path = self.get_script_path()
script_type = self.detect_script_type(script_path)
runner_config = BetterDict()
if script_type == ".py":
runner_class = NoseTester
runner_config.merge(self.settings.get("selenium-tools").get("nose"))
else: # script_type == ".jar" or script_type == ".java":
runner_class = JUnitTester
runner_config.merge(self.settings.get("selenium-tools").get("junit"))
runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties")
runner_config["script-type"] = script_type
runner_config["working-dir"] = working_dir
runner_config.get("artifacts-dir", self.engine.artifacts_dir)
runner_config.get("report-file", kpi_file)
runner_config.get("err-file", err_file)
runner_config.get("stdout", self.engine.create_artifact("junit", ".out"))
runner_config.get("stderr", self.engine.create_artifact("junit", ".err"))
return runner_class(runner_config, self)
def _create_reader(self, kpi_file, err_file):
return JTLReader(kpi_file, self.log, err_file)
def prepare(self):
self.set_virtual_display()
self.scenario = self.get_scenario()
self._verify_script()
self.runner_working_dir = self.engine.create_artifact("classes", "")
self.kpi_file = self.engine.create_artifact("selenium_tests_report", ".csv")
self.err_file = self.engine.create_artifact("selenium_tests_err", ".xml")
self.runner = self._create_runner(self.runner_working_dir, self.kpi_file, self.err_file)
self._cp_resource_files(self.runner_working_dir)
self.runner.prepare()
self.reader = self._create_reader(self.kpi_file, self.err_file)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
def _verify_script(self):
if Scenario.SCRIPT in self.scenario:
self.script = self.scenario.get(Scenario.SCRIPT)
elif "requests" in self.scenario:
self.script = self.__tests_from_requests()
self.self_generated_script = True
else:
raise ValueError("Nothing to test, no requests were provided in scenario")
def _cp_resource_files(self, runner_working_dir):
script = self.get_script_path()
if os.path.isdir(script):
shutil.copytree(script, runner_working_dir)
else:
os.makedirs(runner_working_dir)
if self.self_generated_script:
shutil.move(script, runner_working_dir)
else:
script_type = self.detect_script_type(script)
script_name = os.path.basename(script)
if script_type == ".py" and not script_name.lower().startswith('test'):
target_name = 'test_' + script_name
msg = "Script '%s' won't be discovered by nosetests, renaming script to %s"
self.log.warning(msg, script_name, target_name)
else:
target_name = script_name
target_path = os.path.join(runner_working_dir, target_name)
shutil.copy2(script, target_path)
@staticmethod
def detect_script_type(script_path):
if not isinstance(script_path, string_types) and not isinstance(script_path, text_type):
raise ValueError("Nothing to test, no files were provided in scenario")
if not os.path.exists(script_path):
raise ValueError("Script %s doesn't exist" % script_path)
file_types = set()
if os.path.isfile(script_path): # regular file received
file_types.add(os.path.splitext(script_path)[1].lower())
else: # dir received: check contained files
for file_name in get_files_recursive(script_path):
file_types.add(os.path.splitext(file_name)[1].lower())
if '.java' in file_types:
file_ext = '.java'
elif '.py' in file_types:
file_ext = '.py'
elif '.jar' in file_types:
file_ext = '.jar'
else:
raise ValueError("Unsupported script type: %s" % script_path)
return file_ext
def startup(self):
"""
Start runner
:return:
"""
self.start_time = time.time()
self.runner.env = self.additional_env
self.runner.run_tests()
def check_virtual_display(self):
if self.virtual_display:
if not self.virtual_display.is_alive():
self.log.info("Virtual display out: %s", self.virtual_display.stdout)
self.log.warning("Virtual display err: %s", self.virtual_display.stderr)
raise RuntimeError("Virtual display failed: %s" % self.virtual_display.return_code)
def check(self):
"""
check if test completed
:return:
"""
if self.widget:
self.widget.update()
self.check_virtual_display()
return self.runner.is_finished()
def report_test_duration(self):
if self.start_time:
self.end_time = time.time()
self.log.debug("Selenium tests ran for %s seconds", self.end_time - self.start_time)
def shutdown(self):
"""
shutdown test_runner
:return:
"""
self.runner.shutdown()
self.report_test_duration()
def post_process(self):
self.free_virtual_display()
if self.reader and not self.reader.read_records:
raise RuntimeWarning("Empty results, most likely Selenium failed")
def get_widget(self):
if not self.widget:
self.widget = SeleniumWidget(self.script, self.runner.settings.get("stdout"))
return self.widget
def resource_files(self):
self.scenario = self.get_scenario()
self._verify_script()
script_path = self.get_script_path()
resources = []
if script_path is not None:
resources.append(script_path)
return resources
def __tests_from_requests(self):
filename = self.engine.create_artifact("test_requests", ".py")
nose_test = SeleniumScriptBuilder(self.scenario, self.log)
if self.virtual_display:
nose_test.window_size = self.virtual_display.size
nose_test.gen_test_case()
nose_test.save(filename)
return filename
class AbstractTestRunner(object):
"""
Abstract test runner
"""
def __init__(self, settings, executor):
self.process = None
self.settings = settings
self.required_tools = []
self.executor = executor
self.scenario = executor.scenario
self.load = executor.get_load()
self.artifacts_dir = self.settings.get("artifacts-dir")
self.working_dir = self.settings.get("working-dir")
self.log = executor.log.getChild(self.__class__.__name__)
self.opened_descriptors = []
self.is_failed = False
self.env = {}
@abstractmethod
def prepare(self):
pass
@abstractmethod
def run_checklist(self):
pass
@abstractmethod
def run_tests(self):
pass
def is_finished(self):
ret_code = self.process.poll()
if ret_code is not None:
if ret_code != 0:
self.log.debug("Test runner exit code: %s", ret_code)
with open(self.settings.get("stderr")) as fds:
std_err = fds.read()
self.is_failed = True
raise RuntimeError("Test runner %s has failed: %s" % (self.__class__.__name__, std_err.strip()))
return True
return False
def check_tools(self):
for tool in self.required_tools:
if not tool.check_if_installed():
self.log.info("Installing %s", tool.tool_name)
tool.install()
def shutdown(self):
shutdown_process(self.process, self.log)
for desc in self.opened_descriptors:
desc.close()
self.opened_descriptors = []
class JUnitTester(AbstractTestRunner):
"""
Allows to test java and jar files
"""
def __init__(self, junit_config, executor):
"""
:type junit_config: BetterDict
"""
super(JUnitTester, self).__init__(junit_config, executor)
self.props_file = junit_config['props-file']
path_lambda = lambda key, val: get_full_path(self.settings.get(key, val))
self.junit_path = path_lambda("path", "~/.bzt/selenium-taurus/tools/junit/junit.jar")
self.hamcrest_path = path_lambda("hamcrest-core", "~/.bzt/selenium-taurus/tools/junit/hamcrest-core.jar")
self.selenium_server_jar_path = path_lambda("selenium-server",
"~/.bzt/selenium-taurus/selenium-server.jar")
self.junit_listener_path = os.path.join(get_full_path(__file__, step_up=1),
os.pardir,
"resources",
"taurus-junit-1.0.jar")
self.target_java = str(junit_config.get("compile-target-java", "1.7"))
self.base_class_path = [self.selenium_server_jar_path, self.junit_path, self.junit_listener_path,
self.hamcrest_path]
self.base_class_path.extend(self.scenario.get("additional-classpath", []))
def prepare(self):
"""
run checklist, make jar.
"""
self.run_checklist()
if self.settings.get("script-type", None) == ".java":
self.compile_scripts()
def run_checklist(self):
"""
java
javac
selenium-server.jar
junit.jar
junit_listener.jar
"""
# only check javac if we need to compile. if we have JAR as script - we don't need javac
if self.settings.get("script-type", None) == ".java":
self.required_tools.append(JavaC("", "", self.log))
self.required_tools.append(TclLibrary(self.log))
self.required_tools.append(JavaVM("", "", self.log))
link = SeleniumExecutor.SELENIUM_DOWNLOAD_LINK.format(version=SeleniumExecutor.SELENIUM_VERSION)
self.required_tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log))
self.required_tools.append(JUnitJar(self.junit_path, self.log, SeleniumExecutor.JUNIT_VERSION))
self.required_tools.append(HamcrestJar(self.hamcrest_path, SeleniumExecutor.HAMCREST_DOWNLOAD_LINK))
self.required_tools.append(JUnitListenerJar(self.junit_listener_path, ""))
self.check_tools()
def compile_scripts(self):
"""
Compile .java files
"""
self.log.debug("Compiling .java files started")
jar_path = os.path.join(self.executor.engine.artifacts_dir,
self.working_dir,
self.settings.get("jar-name", "compiled.jar"))
if os.path.exists(jar_path):
self.log.debug(".java files are already compiled, skipping")
return
java_files = []
for dir_entry in os.walk(self.working_dir):
if dir_entry[2]:
for test_file in dir_entry[2]:
if os.path.splitext(test_file)[1].lower() == ".java":
java_files.append(os.path.join(dir_entry[0], test_file))
compile_cl = ["javac", "-source", self.target_java, "-target", self.target_java, ]
compile_cl.extend(["-cp", os.pathsep.join(self.base_class_path)])
compile_cl.extend(java_files)
with open(os.path.join(self.artifacts_dir, "javac.out"), 'ab') as javac_out:
with open(os.path.join(self.artifacts_dir, "javac.err"), 'ab') as javac_err:
self.log.debug("running javac: %s", compile_cl)
self.process = shell_exec(compile_cl, cwd=self.working_dir, stdout=javac_out, stderr=javac_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Compiling .java files...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
self.log.debug("javac exit code: %s", ret_code)
with open(javac_err.name) as err_file:
out = err_file.read()
raise RuntimeError("Javac exited with error:\n %s" % out.strip())
self.log.info("Compiling .java files completed")
self.make_jar()
def make_jar(self):
"""
move all .class files to compiled.jar
"""
self.log.debug("Making .jar started")
with open(os.path.join(self.artifacts_dir, "jar.out"), 'ab') as jar_out:
with open(os.path.join(self.artifacts_dir, "jar.err"), 'ab') as jar_err:
class_files = [java_file for java_file in os.listdir(self.working_dir) if java_file.endswith(".class")]
jar_name = self.settings.get("jar-name", "compiled.jar")
if class_files:
compile_jar_cl = ["jar", "-cf", jar_name]
compile_jar_cl.extend(class_files)
else:
package_dir = os.listdir(self.working_dir)[0]
compile_jar_cl = ["jar", "-cf", jar_name, "-C", package_dir, "."]
self.log.debug("running jar: %s", compile_jar_cl)
self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Making jar file...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
with open(jar_err.name) as err_file:
out = err_file.read()
self.log.info("Making jar failed with code %s", ret_code)
self.log.info("jar output: %s", out)
raise RuntimeError("Jar exited with non-zero code")
self.log.info("Making .jar file completed")
def run_tests(self):
# java -cp junit.jar:selenium-test-small.jar:
# selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar
# org.junit.runner.JUnitCore TestBlazemeterPass
jar_list = [os.path.join(self.working_dir, jar) for jar in os.listdir(self.working_dir) if jar.endswith(".jar")]
self.base_class_path.extend(jar_list)
with open(self.props_file, 'wt') as props:
props.write("kpi_log=%s\n" % self.settings.get("report-file").replace(os.path.sep, '/'))
props.write("error_log=%s\n" % self.settings.get("err-file").replace(os.path.sep, '/'))
if self.load.iterations:
props.write("iterations=%s\n" % self.load.iterations)
if self.load.hold:
props.write("hold_for=%s\n" % self.load.hold)
for index, item in enumerate(jar_list):
props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/')))
std_out = open(self.settings.get("stdout"), "wt")
self.opened_descriptors.append(std_out)
std_err = open(self.settings.get("stderr"), "wt")
self.opened_descriptors.append(std_err)
env = BetterDict()
env.merge(dict(os.environ))
env.merge(self.env)
junit_command_line = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurusjunit.CustomRunner",
self.props_file]
self.process = self.executor.execute(junit_command_line,
cwd=self.artifacts_dir,
stdout=std_out,
stderr=std_err,
env=env)
class NoseTester(AbstractTestRunner):
"""
Python selenium tests runner
"""
def __init__(self, nose_config, executor):
super(NoseTester, self).__init__(nose_config, executor)
self.plugin_path = os.path.join(get_full_path(__file__, step_up=1),
os.pardir,
"resources",
"nose_plugin.py")
def prepare(self):
self.run_checklist()
def run_checklist(self):
"""
we need installed nose plugin
"""
if sys.version >= '3':
self.log.warn("You are using python3, make sure that your scripts are able to run in python3!")
self.required_tools.append(TclLibrary(self.log))
self.required_tools.append(TaurusNosePlugin(self.plugin_path, ""))
self.check_tools()
def run_tests(self):
"""
run python tests
"""
executable = self.settings.get("interpreter", sys.executable)
nose_command_line = [executable, self.plugin_path, '-k', self.settings.get("report-file"),
'-e', self.settings.get("err-file")]
if self.load.iterations:
nose_command_line += ['-i', str(self.load.iterations)]
if self.load.hold:
nose_command_line += ['-d', str(self.load.hold)]
nose_command_line += [self.working_dir]
std_out = open(self.settings.get("stdout"), "wt")
self.opened_descriptors.append(std_out)
std_err = open(self.settings.get("stderr"), "wt")
self.opened_descriptors.append(std_err)
env = BetterDict()
env.merge(dict(os.environ))
env.merge(self.env)
self.process = self.executor.execute(nose_command_line,
cwd=self.artifacts_dir,
stdout=std_out,
stderr=std_err,
env=env)
class SeleniumWidget(urwid.Pile, PrioritizedWidget):
def __init__(self, script, runner_output):
widgets = []
self.script_name = urwid.Text("Tests: %s" % script)
self.summary_stats = urwid.Text("")
self.current_test = urwid.Text("")
self.runner_output = runner_output
widgets.append(self.script_name)
widgets.append(self.summary_stats)
widgets.append(self.current_test)
super(SeleniumWidget, self).__init__(widgets)
PrioritizedWidget.__init__(self, priority=10)
def update(self):
cur_test, reader_summary = ["No data received yet"] * 2
if os.path.exists(self.runner_output):
with open(self.runner_output, "rt") as fds:
lines = fds.readlines()
if lines:
line = lines[-1]
if line and "," in line:
cur_test, reader_summary = line.split(",")
self.current_test.set_text(cur_test)
self.summary_stats.set_text(reader_summary)
self._invalidate()
class SeleniumServerJar(RequiredTool):
def __init__(self, tool_path, download_link, parent_logger):
super(SeleniumServerJar, self).__init__("Selenium server", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
self.log.debug("%s path: %s", self.tool_name, self.tool_path)
selenium_launch_command = ["java", "-jar", self.tool_path, "-help"]
selenium_subproc = shell_exec(selenium_launch_command, stderr=subprocess.STDOUT)
output = selenium_subproc.communicate()
self.log.debug("%s output: %s", self.tool_name, output)
if selenium_subproc.returncode == 0:
self.already_installed = True
return True
else:
return False
class JUnitJar(RequiredTool):
def __init__(self, tool_path, parent_logger, junit_version):
super(JUnitJar, self).__init__("JUnit", tool_path)
self.log = parent_logger.getChild(self.__class__.__name__)
self.version = junit_version
self.mirror_manager = JUnitMirrorsManager(self.log, self.version)
def install(self):
dest = get_full_path(self.tool_path, step_up=1)
dest = os.path.abspath(dest)
junit_dist = super(JUnitJar, self).install_with_mirrors(dest, ".jar")
self.log.info("Installing %s into %s", self.tool_name, dest)
junit_dist.close()
if not os.path.exists(dest):
os.makedirs(dest)
shutil.move(junit_dist.name, self.tool_path)
self.log.info("Installed JUnit successfully")
if not self.check_if_installed():
raise RuntimeError("Unable to run %s after installation!" % self.tool_name)
class HamcrestJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(HamcrestJar, self).__init__("HamcrestJar", tool_path, download_link)
class JavaC(RequiredTool):
def __init__(self, tool_path, download_link, parent_logger):
super(JavaC, self).__init__("JavaC", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
try:
output = subprocess.check_output(["javac", '-version'], stderr=subprocess.STDOUT)
self.log.debug("%s output: %s", self.tool_name, output)
return True
except BaseException:
raise RuntimeError("The %s is not operable or not available. Consider installing it" % self.tool_name)
def install(self):
raise NotImplementedError()
class JUnitListenerJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(JUnitListenerJar, self).__init__("JUnitListener", tool_path, download_link)
def install(self):
raise NotImplementedError()
class TaurusNosePlugin(RequiredTool):
def __init__(self, tool_path, download_link):
super(TaurusNosePlugin, self).__init__("TaurusNosePlugin", tool_path, download_link)
def install(self):
raise NotImplementedError()
class NoseTest(object):
IMPORTS = """import unittest
import re
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
"""
def __init__(self):
self.root = etree.Element("NoseTest")
self.tree = etree.ElementTree(self.root)
def add_imports(self):
imports = etree.Element("imports")
imports.text = NoseTest.IMPORTS
return imports
def gen_class_definition(self, class_name, inherits_from, indent="0"):
def_tmpl = "class {class_name}({inherits_from}):"
class_def_element = etree.Element("class_definition", indent=indent)
class_def_element.text = def_tmpl.format(class_name=class_name, inherits_from="".join(inherits_from))
return class_def_element
def gen_method_definition(self, method_name, params, indent="4"):
def_tmpl = "def {method_name}({params}):"
method_def_element = etree.Element("method_definition", indent=indent)
method_def_element.text = def_tmpl.format(method_name=method_name, params=",".join(params))
return method_def_element
def gen_method_statement(self, statement, indent="8"):
statement_elem = etree.Element("statement", indent=indent)
statement_elem.text = statement
return statement_elem
class SeleniumScriptBuilder(NoseTest):
def __init__(self, scenario, parent_logger):
super(SeleniumScriptBuilder, self).__init__()
self.window_size = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.scenario = scenario
def gen_test_case(self):
self.log.debug("Generating Test Case test method")
imports = self.add_imports()
self.root.append(imports)
test_class = self.gen_class_definition("TestRequests", ["unittest.TestCase"])
self.root.append(test_class)
test_class.append(self.gen_setup_method())
requests = self.scenario.get_requests()
test_method = self.gen_test_method()
test_class.append(test_method)
scenario_timeout = self.scenario.get("timeout", 30)
default_address = self.scenario.get("default-address", None)
for req in requests:
parsed_url = parse.urlparse(req.url)
if default_address is not None and not parsed_url.netloc:
url = default_address + req.url
else:
url = req.url
test_method.append(self.gen_comment("start request: %s" % url))
if req.timeout is not None:
test_method.append(self.gen_impl_wait(req.timeout))
test_method.append(self.gen_method_statement("self.driver.get('%s')" % url))
think_time = req.think_time if req.think_time else self.scenario.get("think-time", None)
if think_time is not None:
test_method.append(self.gen_method_statement("sleep(%s)" % dehumanize_time(think_time)))
if "assert" in req.config:
test_method.append(self.__gen_assert_page())
for assert_config in req.config.get("assert"):
test_method.extend(self.gen_assertion(assert_config))
if req.timeout is not None:
test_method.append(self.gen_impl_wait(scenario_timeout))
test_method.append(self.gen_comment("end request: %s" % url))
test_method.append(self.__gen_new_line())
test_class.append(self.gen_teardown_method())
def gen_setup_method(self):
self.log.debug("Generating setUp test method")
browsers = ["Firefox", "Chrome", "Ie", "Opera"]
browser = self.scenario.get("browser", "Firefox")
if browser not in browsers:
raise ValueError("Unsupported browser name: %s" % browser)
setup_method_def = self.gen_method_definition("setUp", ["self"])
setup_method_def.append(self.gen_method_statement("self.driver=webdriver.%s()" % browser))
scenario_timeout = self.scenario.get("timeout", 30)
setup_method_def.append(self.gen_impl_wait(scenario_timeout))
if self.window_size:
setup_method_def.append(self.gen_method_statement("self.driver.set_window_size(%s, %s)" % self.window_size))
else:
setup_method_def.append(self.gen_method_statement("self.driver.maximize_window()"))
setup_method_def.append(self.__gen_new_line())
return setup_method_def
def gen_impl_wait(self, timeout):
return self.gen_method_statement("self.driver.implicitly_wait(%s)" % dehumanize_time(timeout))
def gen_comment(self, comment):
return self.gen_method_statement("# %s" % comment)
def gen_test_method(self):
self.log.debug("Generating test method")
test_method = self.gen_method_definition("test_method", ["self"])
return test_method
def gen_teardown_method(self):
self.log.debug("Generating tearDown test method")
tear_down_method_def = self.gen_method_definition("tearDown", ["self"])
tear_down_method_def.append(self.gen_method_statement("self.driver.quit()"))
return tear_down_method_def
def gen_assertion(self, assertion_config):
self.log.debug("Generating assertion, config: %s", assertion_config)
assertion_elements = []
if isinstance(assertion_config, string_types):
assertion_config = {"contains": [assertion_config]}
for val in assertion_config["contains"]:
regexp = assertion_config.get("regexp", True)
reverse = assertion_config.get("not", False)
subject = assertion_config.get("subject", "body")
if subject != "body":
raise ValueError("Only 'body' subject supported ")
if regexp:
assert_method = "self.assertEqual" if reverse else "self.assertNotEqual"
assertion_elements.append(self.gen_method_statement('re_pattern = re.compile("%s")' % val))
method = '%s(0, len(re.findall(re_pattern, body)))' % assert_method
assertion_elements.append(self.gen_method_statement(method))
else:
assert_method = "self.assertNotIn" if reverse else "self.assertIn"
assertion_elements.append(self.gen_method_statement('%s("%s", body)' % (assert_method, val)))
return assertion_elements
def __gen_new_line(self, indent="8"):
return self.gen_method_statement("", indent=indent)
def __gen_assert_page(self):
return self.gen_method_statement("body = self.driver.page_source")
def save(self, filename):
with open(filename, 'wt') as fds:
for child in self.root.iter():
if child.text is not None:
indent = int(child.get('indent', "0"))
fds.write(" " * indent + child.text + "\n")
class JUnitMirrorsManager(MirrorsManager):
def __init__(self, parent_logger, junit_version):
self.junit_version = junit_version
super(JUnitMirrorsManager, self).__init__(SeleniumExecutor.JUNIT_MIRRORS_SOURCE, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
try:
resp = json.loads(self.page_source)
objects = resp.get("response", {}).get("docs", [])
if objects:
obj = objects[0]
group = obj.get("g")
artifact = obj.get("a")
version = obj.get("v")
ext = obj.get("p")
link_template = "http://search.maven.org/remotecontent?filepath={group}/{artifact}/" \
"{version}/{artifact}-{version}.{ext}"
link = link_template.format(group=group, artifact=artifact, version=version, ext=ext)
links.append(link)
except BaseException as exc:
self.log.error("Error while parsing mirrors %s", exc)
default_link = SeleniumExecutor.JUNIT_DOWNLOAD_LINK.format(version=self.junit_version)
if default_link not in links:
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
|
en
| 0.659566
|
Copyright 2015 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Selenium executor :type virtual_display: Display :type runner: AbstractTestRunner # script_type == ".jar" or script_type == ".java": # regular file received # dir received: check contained files Start runner :return: check if test completed :return: shutdown test_runner :return: Abstract test runner Allows to test java and jar files :type junit_config: BetterDict run checklist, make jar. java javac selenium-server.jar junit.jar junit_listener.jar # only check javac if we need to compile. if we have JAR as script - we don't need javac Compile .java files move all .class files to compiled.jar # java -cp junit.jar:selenium-test-small.jar: # selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar # org.junit.runner.JUnitCore TestBlazemeterPass Python selenium tests runner we need installed nose plugin run python tests import unittest import re from time import sleep from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException
| 1.507648
| 2
|
tests/test_run.py
|
SeanDS/python-ngspice
| 0
|
6629237
|
<filename>tests/test_run.py
"""Run function tests.
Note: tests of the behaviour in running simulations (e.g. error handling) should instead go in
`test_session.py`.
"""
from io import StringIO
from itertools import zip_longest
import pytest
from ngspice import run, run_file, SimulationError
from ngspice.testing import assert_solutions_equal
from .util import dedent_multiline
scripts = pytest.mark.parametrize(
"script",
(
dedent_multiline(
"""
Test script
R1 n1 n2 1k
R2 n2 0 10k
V1 n1 0 DC 1
.op
.end
"""
),
)
)
@scripts
def test_run(script):
"""Run netlist from string."""
assert run(script)
@scripts
def test_run_file__str(script_path, script):
"""Run netlist from path string."""
with script_path.open("w") as fobj:
fobj.write(script)
assert run_file(str(script_path))
@scripts
def test_run_file__path(script_path, script):
"""Run netlist from :class:`pathlib.Path`."""
with script_path.open("w") as fobj:
fobj.write(script)
assert run_file(script_path)
@scripts
def test_run_file__stringio(script):
"""Run netlist from :class:`io.StringIO`."""
assert run_file(StringIO(script))
@scripts
def test_run_results_same_as_run_file_results(script_path, script):
"""Check runs via string and file give same results when the script is the same."""
with script_path.open("w") as fobj:
fobj.write(script)
sols1 = run(script)
sols2 = run_file(str(script_path))
for (name1, sol1), (name2, sol2) in zip_longest(sols1.items(), sols2.items()):
assert name1 == name2
assert_solutions_equal(sol1, sol2)
|
<filename>tests/test_run.py
"""Run function tests.
Note: tests of the behaviour in running simulations (e.g. error handling) should instead go in
`test_session.py`.
"""
from io import StringIO
from itertools import zip_longest
import pytest
from ngspice import run, run_file, SimulationError
from ngspice.testing import assert_solutions_equal
from .util import dedent_multiline
scripts = pytest.mark.parametrize(
"script",
(
dedent_multiline(
"""
Test script
R1 n1 n2 1k
R2 n2 0 10k
V1 n1 0 DC 1
.op
.end
"""
),
)
)
@scripts
def test_run(script):
"""Run netlist from string."""
assert run(script)
@scripts
def test_run_file__str(script_path, script):
"""Run netlist from path string."""
with script_path.open("w") as fobj:
fobj.write(script)
assert run_file(str(script_path))
@scripts
def test_run_file__path(script_path, script):
"""Run netlist from :class:`pathlib.Path`."""
with script_path.open("w") as fobj:
fobj.write(script)
assert run_file(script_path)
@scripts
def test_run_file__stringio(script):
"""Run netlist from :class:`io.StringIO`."""
assert run_file(StringIO(script))
@scripts
def test_run_results_same_as_run_file_results(script_path, script):
"""Check runs via string and file give same results when the script is the same."""
with script_path.open("w") as fobj:
fobj.write(script)
sols1 = run(script)
sols2 = run_file(str(script_path))
for (name1, sol1), (name2, sol2) in zip_longest(sols1.items(), sols2.items()):
assert name1 == name2
assert_solutions_equal(sol1, sol2)
|
en
| 0.681992
|
Run function tests. Note: tests of the behaviour in running simulations (e.g. error handling) should instead go in `test_session.py`. Test script R1 n1 n2 1k R2 n2 0 10k V1 n1 0 DC 1 .op .end Run netlist from string. Run netlist from path string. Run netlist from :class:`pathlib.Path`. Run netlist from :class:`io.StringIO`. Check runs via string and file give same results when the script is the same.
| 2.777707
| 3
|
smilepack/utils/uploader.py
|
andreymal/smilepack
| 0
|
6629238
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
from io import BytesIO
from hashlib import sha256
from urllib.request import urlopen, Request
from flask import current_app
class BadImageError(Exception):
pass
class Uploader(object):
def __init__(self, method, directory, maxbytes, minres, maxres, processing_mode, dirmode=0o755, filemode=0o644):
'''Параметры загрузки:
* method — None, 'imgur' или 'directory' — куда сохранять картинку (None означает отсутствие сохранения и требует url)
* directory — каталог, в который будет сохранена картинка, для метода directory
* maxbytes — максимальный размер картинки в байтах
* minres — (длина, ширина) — минимальное разрешение картинки
* maxres — (длина, ширина) — максимальное разрешение картинки
* processing_mode — режим обработки картинки:
- 'none' — не делать ничего, разрешние не проверяется и сжатие не делается
- 'optional' — при отсутствии Pillow ничего не будет делаться
- 'required' — при отсутствии Pillow будет выброшено исключение
'''
if method and method not in ('imgur', 'directory'):
raise RuntimeError('Unknown upload method setted in settings')
self.method = method
self.directory = directory
self.maxbytes = maxbytes
self.minres = minres
self.maxres = maxres
if processing_mode not in ('none', 'required', 'optional'):
raise ValueError('Invalid processing_mode')
self.processing_mode = processing_mode
self.dirmode = dirmode
self.filemode = filemode
def upload(self, data=None, url=None, compress=False, hashsum=None, image_format=None):
'''Проверяет, обрабатывает и сохраняет картинку согласно параметрам.
Если передать url, то при отсутствии изменений у картинки она пересохранена не будет.
Если передать image_format (JPEG/PNG/GIF), то не будет проверяться валидность картинки.
'''
if data is None:
data = get_data(None, url, self.maxbytes)
if len(data) > self.maxbytes:
raise BadImageError('Too big image size')
if not hashsum:
hashsum = calc_hashsum(data)
image = None
# Если нас просят обрабатывать картинку, проверяем её валидность
if self.processing_mode != 'none':
if not image_format:
image, image_format = self.open_and_check(data, image_format)
# Обработка картинки
compression_method = None
try:
# Если нас просили её сжимать, сжимаем
if self.method and self.processing_mode != 'none':
if compress:
data, compression_method = compress_image(data, image=image, optional=self.processing_mode == 'optional')
if compression_method:
hashsum = calc_hashsum(data)
finally:
if image:
image.close()
image = None
# Если нам дали ссылку и картинку не сжали или мы не можем сохранять у себя, то больше ничего и не надо
if url and not compression_method:
if '?' in url or url.endswith('/'):
return {'filename': 'image', 'url': url, 'hashsum': hashsum, 'compression_method': None}
else:
return {'filename': url[url.rfind('/') + 1:], 'url': url, 'hashsum': hashsum, 'compression_method': None}
# Если картинку сохранять оказалось надо, а мы не можем, то облом
if not self.method:
raise RuntimeError('Uploading is not available')
# Сохраняем
if self.method == 'imgur':
result = upload_to_imgur(data, hashsum)
elif self.method == 'directory':
result = upload_to_directory(
self.directory,
data,
hashsum,
image_format=image_format,
dirmode=self.dirmode,
filemode=self.filemode
)
else:
raise NotImplementedError
result['compression_method'] = compression_method
return result
def open_and_check(self, data, image_format=None):
try:
from PIL import Image
except ImportError:
if self.processing_mode == 'required':
raise
if data.startswith(b'\xff\xd8\xff\xe0'):
image_format = 'JPEG'
elif data.startswith(b'GIF8'):
image_format = 'GIF'
elif data.startswith(b'\x89PNG\r\n\x1a\n'):
image_format = 'PNG'
elif not image_format:
raise BadImageError('image_format missing')
return None, image_format
try:
image = Image.open(BytesIO(data))
except:
raise BadImageError('Cannot decode image')
try:
if image.format not in ('JPEG', 'GIF', 'PNG'):
raise BadImageError('Invalid image format')
w, h = image.size
if w < self.minres[0] or h < self.minres[1]:
raise BadImageError('Too small size')
if w > self.maxres[0] or h > self.maxres[1]:
raise BadImageError('Too big size')
return image, image.format
except:
image.close()
image = None
raise
def download(url, maxlen=None, timeout=10, chunksize=16384):
if not url.startswith('http://') and not url.startswith('https://'):
raise IOError('Invalid URL protocol')
req = Request(url)
req.add_header('User-Agent', 'smilepack/0.2.1')
resp = urlopen(req, timeout=timeout)
buf = []
size = 0
started_at = time.time()
while True:
d = resp.read(chunksize)
if not d:
break
buf.append(d)
size += len(d)
if maxlen is not None and size > maxlen:
raise IOError('Too long response')
if time.time() - started_at >= timeout:
raise IOError('Timeout')
return b''.join(buf)
def calc_hashsum(data):
return sha256(data).hexdigest()
def get_data(stream=None, url=None, maxbytes=None):
if not stream and not url or stream and url:
raise ValueError('Please set stream or url')
if stream and maxbytes is not None:
data = stream.read(maxbytes + 1)
elif stream:
data = stream.read()
else:
data = download(url, maxbytes)
if maxbytes is not None and len(data) > maxbytes:
raise IOError('Too long response')
return data
def compress_image(data, image=None, optional=False, compress_size=None):
min_size = len(data)
# Если сжимать совсем нет смысла
if min_size <= 4096:
return data, None
image_local = False
if not image:
image_local = True
try:
from PIL import Image
except ImportError:
if not optional:
raise
return data, None
try:
image = Image.open(BytesIO(data))
except:
raise BadImageError('Cannot decode image')
try:
# Если сжимать не умеем
if image.format != 'PNG':
return data, None
# TODO: придумать, как защититься от вандализма загрузкой смайлов
# по урлу с неадекватным изменением размера, и уже тогда включить
# FIXME: слетает альфа-канал на PNG RGBA
# if image.format == 'JPEG' or image.mode == 'RGB':
# if compress_size and compress_size[0] * compress_size[1] < image.size[0] * image.size[1]:
# image2 = image.resize(compress_size, Image.ANTIALIAS)
# image2.format = image.format
# if image_local:
# image.close()
# image = image2
# del image2
# А PNG пробуем сжать разными методами
test_data, method = compress_png(image)
finally:
if image_local:
image.close()
image = None
# Сохраняем сжатие, только если оно существенно
if test_data and min_size - len(test_data) > 1024:
return test_data, method
else:
return data, None
def compress_png(image):
# 0) Пробуем просто пересохранить
min_stream = BytesIO()
image.save(min_stream, 'PNG', optimize=True)
min_size = len(min_stream.getvalue())
method = 'resave'
# 1) Пробуем пересохранить с zlib (иногда почему-то меньше, чем optimize=True)
test_stream = BytesIO()
image.save(test_stream, 'PNG', compress_level=9)
test_size = len(test_stream.getvalue())
if test_size < min_size:
min_stream = test_stream
min_size = test_size
method = 'zlib'
# 2) Пробуем закрасить чёрным невидимое
if image.mode == 'RGBA':
from PIL import ImageDraw
with image.copy() as test_image:
w = test_image.size[0]
draw = None
for i, pixel in enumerate(test_image.getdata()):
if pixel[3] < 1:
if draw is None:
draw = ImageDraw.Draw(test_image)
draw.point([(i % w, i // w)], (0, 0, 0, 0))
if draw is not None:
test_stream = BytesIO()
test_image.save(test_stream, 'PNG', optimize=True)
test_size = len(test_stream.getvalue())
if test_size < min_size:
min_stream = test_stream
min_size = test_size
method = 'zeroalpha'
del draw
return min_stream.getvalue(), method
def upload_to_imgur(data, hashsum):
image_data = current_app.imgur.send_image(BytesIO(data))
if not image_data.get('success'):
current_app.logger.error('Cannot upload image: %s', image_data)
raise IOError('Cannot upload image')
link = image_data['data']['link']
new_hashsum = calc_hashsum(download(link)) # Imgur имеет свойство пережимать большие картинки
return {'filename': link[link.rfind('/') + 1:], 'url': link, 'hashsum': new_hashsum}
def upload_to_directory(upload_dir, data, hashsum, image_format=None, dirmode=0o755, filemode=0o644):
if dirmode < 0 or dirmode > 0o777:
raise ValueError('Invalid dirmode')
if filemode < 0 or filemode > 0o777:
raise ValueError('Invalid filemode')
subdir = os.path.join(hashsum[:2], hashsum[2:4])
filename = hashsum[4:10]
if image_format == 'PNG':
filename += '.png'
elif image_format == 'JPEG':
filename += '.jpg'
elif image_format == 'GIF':
filename += '.gif'
else:
current_app.logger.error('Saved image %s.wtf with unknown format %s', os.path.join(subdir, filename), image_format)
filename += '.wtf'
full_filename = os.path.join(subdir, filename) # ab/cd/ef0123.ext
upload_dir = os.path.join(upload_dir, subdir) # /path/to/smiles/
os.makedirs(upload_dir, mode=dirmode, exist_ok=True)
full_path = os.path.join(upload_dir, filename) # /path/to/smiles/ab/cd/ef0123.ext
with open(full_path, 'wb') as fp:
fp.write(data)
os.chmod(full_path, filemode)
return {'filename': full_filename.replace(os.path.sep, '/'), 'url': None, 'hashsum': hashsum}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
from io import BytesIO
from hashlib import sha256
from urllib.request import urlopen, Request
from flask import current_app
class BadImageError(Exception):
pass
class Uploader(object):
def __init__(self, method, directory, maxbytes, minres, maxres, processing_mode, dirmode=0o755, filemode=0o644):
'''Параметры загрузки:
* method — None, 'imgur' или 'directory' — куда сохранять картинку (None означает отсутствие сохранения и требует url)
* directory — каталог, в который будет сохранена картинка, для метода directory
* maxbytes — максимальный размер картинки в байтах
* minres — (длина, ширина) — минимальное разрешение картинки
* maxres — (длина, ширина) — максимальное разрешение картинки
* processing_mode — режим обработки картинки:
- 'none' — не делать ничего, разрешние не проверяется и сжатие не делается
- 'optional' — при отсутствии Pillow ничего не будет делаться
- 'required' — при отсутствии Pillow будет выброшено исключение
'''
if method and method not in ('imgur', 'directory'):
raise RuntimeError('Unknown upload method setted in settings')
self.method = method
self.directory = directory
self.maxbytes = maxbytes
self.minres = minres
self.maxres = maxres
if processing_mode not in ('none', 'required', 'optional'):
raise ValueError('Invalid processing_mode')
self.processing_mode = processing_mode
self.dirmode = dirmode
self.filemode = filemode
def upload(self, data=None, url=None, compress=False, hashsum=None, image_format=None):
'''Проверяет, обрабатывает и сохраняет картинку согласно параметрам.
Если передать url, то при отсутствии изменений у картинки она пересохранена не будет.
Если передать image_format (JPEG/PNG/GIF), то не будет проверяться валидность картинки.
'''
if data is None:
data = get_data(None, url, self.maxbytes)
if len(data) > self.maxbytes:
raise BadImageError('Too big image size')
if not hashsum:
hashsum = calc_hashsum(data)
image = None
# Если нас просят обрабатывать картинку, проверяем её валидность
if self.processing_mode != 'none':
if not image_format:
image, image_format = self.open_and_check(data, image_format)
# Обработка картинки
compression_method = None
try:
# Если нас просили её сжимать, сжимаем
if self.method and self.processing_mode != 'none':
if compress:
data, compression_method = compress_image(data, image=image, optional=self.processing_mode == 'optional')
if compression_method:
hashsum = calc_hashsum(data)
finally:
if image:
image.close()
image = None
# Если нам дали ссылку и картинку не сжали или мы не можем сохранять у себя, то больше ничего и не надо
if url and not compression_method:
if '?' in url or url.endswith('/'):
return {'filename': 'image', 'url': url, 'hashsum': hashsum, 'compression_method': None}
else:
return {'filename': url[url.rfind('/') + 1:], 'url': url, 'hashsum': hashsum, 'compression_method': None}
# Если картинку сохранять оказалось надо, а мы не можем, то облом
if not self.method:
raise RuntimeError('Uploading is not available')
# Сохраняем
if self.method == 'imgur':
result = upload_to_imgur(data, hashsum)
elif self.method == 'directory':
result = upload_to_directory(
self.directory,
data,
hashsum,
image_format=image_format,
dirmode=self.dirmode,
filemode=self.filemode
)
else:
raise NotImplementedError
result['compression_method'] = compression_method
return result
def open_and_check(self, data, image_format=None):
try:
from PIL import Image
except ImportError:
if self.processing_mode == 'required':
raise
if data.startswith(b'\xff\xd8\xff\xe0'):
image_format = 'JPEG'
elif data.startswith(b'GIF8'):
image_format = 'GIF'
elif data.startswith(b'\x89PNG\r\n\x1a\n'):
image_format = 'PNG'
elif not image_format:
raise BadImageError('image_format missing')
return None, image_format
try:
image = Image.open(BytesIO(data))
except:
raise BadImageError('Cannot decode image')
try:
if image.format not in ('JPEG', 'GIF', 'PNG'):
raise BadImageError('Invalid image format')
w, h = image.size
if w < self.minres[0] or h < self.minres[1]:
raise BadImageError('Too small size')
if w > self.maxres[0] or h > self.maxres[1]:
raise BadImageError('Too big size')
return image, image.format
except:
image.close()
image = None
raise
def download(url, maxlen=None, timeout=10, chunksize=16384):
if not url.startswith('http://') and not url.startswith('https://'):
raise IOError('Invalid URL protocol')
req = Request(url)
req.add_header('User-Agent', 'smilepack/0.2.1')
resp = urlopen(req, timeout=timeout)
buf = []
size = 0
started_at = time.time()
while True:
d = resp.read(chunksize)
if not d:
break
buf.append(d)
size += len(d)
if maxlen is not None and size > maxlen:
raise IOError('Too long response')
if time.time() - started_at >= timeout:
raise IOError('Timeout')
return b''.join(buf)
def calc_hashsum(data):
return sha256(data).hexdigest()
def get_data(stream=None, url=None, maxbytes=None):
if not stream and not url or stream and url:
raise ValueError('Please set stream or url')
if stream and maxbytes is not None:
data = stream.read(maxbytes + 1)
elif stream:
data = stream.read()
else:
data = download(url, maxbytes)
if maxbytes is not None and len(data) > maxbytes:
raise IOError('Too long response')
return data
def compress_image(data, image=None, optional=False, compress_size=None):
min_size = len(data)
# Если сжимать совсем нет смысла
if min_size <= 4096:
return data, None
image_local = False
if not image:
image_local = True
try:
from PIL import Image
except ImportError:
if not optional:
raise
return data, None
try:
image = Image.open(BytesIO(data))
except:
raise BadImageError('Cannot decode image')
try:
# Если сжимать не умеем
if image.format != 'PNG':
return data, None
# TODO: придумать, как защититься от вандализма загрузкой смайлов
# по урлу с неадекватным изменением размера, и уже тогда включить
# FIXME: слетает альфа-канал на PNG RGBA
# if image.format == 'JPEG' or image.mode == 'RGB':
# if compress_size and compress_size[0] * compress_size[1] < image.size[0] * image.size[1]:
# image2 = image.resize(compress_size, Image.ANTIALIAS)
# image2.format = image.format
# if image_local:
# image.close()
# image = image2
# del image2
# А PNG пробуем сжать разными методами
test_data, method = compress_png(image)
finally:
if image_local:
image.close()
image = None
# Сохраняем сжатие, только если оно существенно
if test_data and min_size - len(test_data) > 1024:
return test_data, method
else:
return data, None
def compress_png(image):
# 0) Пробуем просто пересохранить
min_stream = BytesIO()
image.save(min_stream, 'PNG', optimize=True)
min_size = len(min_stream.getvalue())
method = 'resave'
# 1) Пробуем пересохранить с zlib (иногда почему-то меньше, чем optimize=True)
test_stream = BytesIO()
image.save(test_stream, 'PNG', compress_level=9)
test_size = len(test_stream.getvalue())
if test_size < min_size:
min_stream = test_stream
min_size = test_size
method = 'zlib'
# 2) Пробуем закрасить чёрным невидимое
if image.mode == 'RGBA':
from PIL import ImageDraw
with image.copy() as test_image:
w = test_image.size[0]
draw = None
for i, pixel in enumerate(test_image.getdata()):
if pixel[3] < 1:
if draw is None:
draw = ImageDraw.Draw(test_image)
draw.point([(i % w, i // w)], (0, 0, 0, 0))
if draw is not None:
test_stream = BytesIO()
test_image.save(test_stream, 'PNG', optimize=True)
test_size = len(test_stream.getvalue())
if test_size < min_size:
min_stream = test_stream
min_size = test_size
method = 'zeroalpha'
del draw
return min_stream.getvalue(), method
def upload_to_imgur(data, hashsum):
image_data = current_app.imgur.send_image(BytesIO(data))
if not image_data.get('success'):
current_app.logger.error('Cannot upload image: %s', image_data)
raise IOError('Cannot upload image')
link = image_data['data']['link']
new_hashsum = calc_hashsum(download(link)) # Imgur имеет свойство пережимать большие картинки
return {'filename': link[link.rfind('/') + 1:], 'url': link, 'hashsum': new_hashsum}
def upload_to_directory(upload_dir, data, hashsum, image_format=None, dirmode=0o755, filemode=0o644):
if dirmode < 0 or dirmode > 0o777:
raise ValueError('Invalid dirmode')
if filemode < 0 or filemode > 0o777:
raise ValueError('Invalid filemode')
subdir = os.path.join(hashsum[:2], hashsum[2:4])
filename = hashsum[4:10]
if image_format == 'PNG':
filename += '.png'
elif image_format == 'JPEG':
filename += '.jpg'
elif image_format == 'GIF':
filename += '.gif'
else:
current_app.logger.error('Saved image %s.wtf with unknown format %s', os.path.join(subdir, filename), image_format)
filename += '.wtf'
full_filename = os.path.join(subdir, filename) # ab/cd/ef0123.ext
upload_dir = os.path.join(upload_dir, subdir) # /path/to/smiles/
os.makedirs(upload_dir, mode=dirmode, exist_ok=True)
full_path = os.path.join(upload_dir, filename) # /path/to/smiles/ab/cd/ef0123.ext
with open(full_path, 'wb') as fp:
fp.write(data)
os.chmod(full_path, filemode)
return {'filename': full_filename.replace(os.path.sep, '/'), 'url': None, 'hashsum': hashsum}
|
ru
| 0.968529
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- Параметры загрузки: * method — None, 'imgur' или 'directory' — куда сохранять картинку (None означает отсутствие сохранения и требует url) * directory — каталог, в который будет сохранена картинка, для метода directory * maxbytes — максимальный размер картинки в байтах * minres — (длина, ширина) — минимальное разрешение картинки * maxres — (длина, ширина) — максимальное разрешение картинки * processing_mode — режим обработки картинки: - 'none' — не делать ничего, разрешние не проверяется и сжатие не делается - 'optional' — при отсутствии Pillow ничего не будет делаться - 'required' — при отсутствии Pillow будет выброшено исключение Проверяет, обрабатывает и сохраняет картинку согласно параметрам. Если передать url, то при отсутствии изменений у картинки она пересохранена не будет. Если передать image_format (JPEG/PNG/GIF), то не будет проверяться валидность картинки. # Если нас просят обрабатывать картинку, проверяем её валидность # Обработка картинки # Если нас просили её сжимать, сжимаем # Если нам дали ссылку и картинку не сжали или мы не можем сохранять у себя, то больше ничего и не надо # Если картинку сохранять оказалось надо, а мы не можем, то облом # Сохраняем # Если сжимать совсем нет смысла # Если сжимать не умеем # TODO: придумать, как защититься от вандализма загрузкой смайлов # по урлу с неадекватным изменением размера, и уже тогда включить # FIXME: слетает альфа-канал на PNG RGBA # if image.format == 'JPEG' or image.mode == 'RGB': # if compress_size and compress_size[0] * compress_size[1] < image.size[0] * image.size[1]: # image2 = image.resize(compress_size, Image.ANTIALIAS) # image2.format = image.format # if image_local: # image.close() # image = image2 # del image2 # А PNG пробуем сжать разными методами # Сохраняем сжатие, только если оно существенно # 0) Пробуем просто пересохранить # 1) Пробуем пересохранить с zlib (иногда почему-то меньше, чем optimize=True) # 2) Пробуем закрасить чёрным невидимое # Imgur имеет свойство пережимать большие картинки # ab/cd/ef0123.ext # /path/to/smiles/ # /path/to/smiles/ab/cd/ef0123.ext
| 2.520285
| 3
|
sdk/communication/azure-communication-networktraversal/azure/communication/networktraversal/_generated/models/_models.py
|
moovy2/azure-sdk-for-python
| 2,728
|
6629239
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class CommunicationError(msrest.serialization.Model):
"""The Communication Services error.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar code: Required. The error code.
:vartype code: str
:ivar message: Required. The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: Further details about specific errors that led to this error.
:vartype details: list[~azure.communication.networktraversal.models.CommunicationError]
:ivar inner_error: The inner error if any.
:vartype inner_error: ~azure.communication.networktraversal.models.CommunicationError
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
'target': {'readonly': True},
'details': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CommunicationError]'},
'inner_error': {'key': 'innererror', 'type': 'CommunicationError'},
}
def __init__(
self,
**kwargs
):
"""
:keyword code: Required. The error code.
:paramtype code: str
:keyword message: Required. The error message.
:paramtype message: str
"""
super(CommunicationError, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = None
self.details = None
self.inner_error = None
class CommunicationErrorResponse(msrest.serialization.Model):
"""The Communication Services error.
All required parameters must be populated in order to send to Azure.
:ivar error: Required. The Communication Services error.
:vartype error: ~azure.communication.networktraversal.models.CommunicationError
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'CommunicationError'},
}
def __init__(
self,
**kwargs
):
"""
:keyword error: Required. The Communication Services error.
:paramtype error: ~azure.communication.networktraversal.models.CommunicationError
"""
super(CommunicationErrorResponse, self).__init__(**kwargs)
self.error = kwargs['error']
class CommunicationIceServer(msrest.serialization.Model):
"""An instance of a STUN/TURN server with credentials to be used for ICE negotiation.
All required parameters must be populated in order to send to Azure.
:ivar urls: Required. List of STUN/TURN server URLs.
:vartype urls: list[str]
:ivar username: Required. User account name which uniquely identifies the credentials.
:vartype username: str
:ivar credential: Required. Credential for the server.
:vartype credential: str
:ivar route_type: Required. The routing methodology to where the ICE server will be located
from the client. Possible values include: "any", "nearest".
:vartype route_type: str or ~azure.communication.networktraversal.models.RouteType
"""
_validation = {
'urls': {'required': True},
'username': {'required': True},
'credential': {'required': True},
'route_type': {'required': True},
}
_attribute_map = {
'urls': {'key': 'urls', 'type': '[str]'},
'username': {'key': 'username', 'type': 'str'},
'credential': {'key': 'credential', 'type': 'str'},
'route_type': {'key': 'routeType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword urls: Required. List of STUN/TURN server URLs.
:paramtype urls: list[str]
:keyword username: Required. User account name which uniquely identifies the credentials.
:paramtype username: str
:keyword credential: Required. Credential for the server.
:paramtype credential: str
:keyword route_type: Required. The routing methodology to where the ICE server will be located
from the client. Possible values include: "any", "nearest".
:paramtype route_type: str or ~azure.communication.networktraversal.models.RouteType
"""
super(CommunicationIceServer, self).__init__(**kwargs)
self.urls = kwargs['urls']
self.username = kwargs['username']
self.credential = kwargs['credential']
self.route_type = kwargs['route_type']
class CommunicationRelayConfiguration(msrest.serialization.Model):
"""A relay configuration containing the STUN/TURN URLs and credentials.
All required parameters must be populated in order to send to Azure.
:ivar expires_on: Required. The date for which the username and credentials are not longer
valid.
:vartype expires_on: ~datetime.datetime
:ivar ice_servers: Required. An array representing the credentials and the STUN/TURN server
URLs for use in ICE negotiations.
:vartype ice_servers: list[~azure.communication.networktraversal.models.CommunicationIceServer]
"""
_validation = {
'expires_on': {'required': True},
'ice_servers': {'required': True},
}
_attribute_map = {
'expires_on': {'key': 'expiresOn', 'type': 'iso-8601'},
'ice_servers': {'key': 'iceServers', 'type': '[CommunicationIceServer]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword expires_on: Required. The date for which the username and credentials are not longer
valid.
:paramtype expires_on: ~datetime.datetime
:keyword ice_servers: Required. An array representing the credentials and the STUN/TURN server
URLs for use in ICE negotiations.
:paramtype ice_servers:
list[~azure.communication.networktraversal.models.CommunicationIceServer]
"""
super(CommunicationRelayConfiguration, self).__init__(**kwargs)
self.expires_on = kwargs['expires_on']
self.ice_servers = kwargs['ice_servers']
class CommunicationRelayConfigurationRequest(msrest.serialization.Model):
"""Request for a CommunicationRelayConfiguration.
:ivar id: An existing ACS identity.
:vartype id: str
:ivar route_type: The routing methodology to where the ICE server will be located from the
client. Possible values include: "any", "nearest".
:vartype route_type: str or ~azure.communication.networktraversal.models.RouteType
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'route_type': {'key': 'routeType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword id: An existing ACS identity.
:paramtype id: str
:keyword route_type: The routing methodology to where the ICE server will be located from the
client. Possible values include: "any", "nearest".
:paramtype route_type: str or ~azure.communication.networktraversal.models.RouteType
"""
super(CommunicationRelayConfigurationRequest, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.route_type = kwargs.get('route_type', None)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class CommunicationError(msrest.serialization.Model):
"""The Communication Services error.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar code: Required. The error code.
:vartype code: str
:ivar message: Required. The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: Further details about specific errors that led to this error.
:vartype details: list[~azure.communication.networktraversal.models.CommunicationError]
:ivar inner_error: The inner error if any.
:vartype inner_error: ~azure.communication.networktraversal.models.CommunicationError
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
'target': {'readonly': True},
'details': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CommunicationError]'},
'inner_error': {'key': 'innererror', 'type': 'CommunicationError'},
}
def __init__(
self,
**kwargs
):
"""
:keyword code: Required. The error code.
:paramtype code: str
:keyword message: Required. The error message.
:paramtype message: str
"""
super(CommunicationError, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = None
self.details = None
self.inner_error = None
class CommunicationErrorResponse(msrest.serialization.Model):
"""The Communication Services error.
All required parameters must be populated in order to send to Azure.
:ivar error: Required. The Communication Services error.
:vartype error: ~azure.communication.networktraversal.models.CommunicationError
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'CommunicationError'},
}
def __init__(
self,
**kwargs
):
"""
:keyword error: Required. The Communication Services error.
:paramtype error: ~azure.communication.networktraversal.models.CommunicationError
"""
super(CommunicationErrorResponse, self).__init__(**kwargs)
self.error = kwargs['error']
class CommunicationIceServer(msrest.serialization.Model):
"""An instance of a STUN/TURN server with credentials to be used for ICE negotiation.
All required parameters must be populated in order to send to Azure.
:ivar urls: Required. List of STUN/TURN server URLs.
:vartype urls: list[str]
:ivar username: Required. User account name which uniquely identifies the credentials.
:vartype username: str
:ivar credential: Required. Credential for the server.
:vartype credential: str
:ivar route_type: Required. The routing methodology to where the ICE server will be located
from the client. Possible values include: "any", "nearest".
:vartype route_type: str or ~azure.communication.networktraversal.models.RouteType
"""
_validation = {
'urls': {'required': True},
'username': {'required': True},
'credential': {'required': True},
'route_type': {'required': True},
}
_attribute_map = {
'urls': {'key': 'urls', 'type': '[str]'},
'username': {'key': 'username', 'type': 'str'},
'credential': {'key': 'credential', 'type': 'str'},
'route_type': {'key': 'routeType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword urls: Required. List of STUN/TURN server URLs.
:paramtype urls: list[str]
:keyword username: Required. User account name which uniquely identifies the credentials.
:paramtype username: str
:keyword credential: Required. Credential for the server.
:paramtype credential: str
:keyword route_type: Required. The routing methodology to where the ICE server will be located
from the client. Possible values include: "any", "nearest".
:paramtype route_type: str or ~azure.communication.networktraversal.models.RouteType
"""
super(CommunicationIceServer, self).__init__(**kwargs)
self.urls = kwargs['urls']
self.username = kwargs['username']
self.credential = kwargs['credential']
self.route_type = kwargs['route_type']
class CommunicationRelayConfiguration(msrest.serialization.Model):
"""A relay configuration containing the STUN/TURN URLs and credentials.
All required parameters must be populated in order to send to Azure.
:ivar expires_on: Required. The date for which the username and credentials are not longer
valid.
:vartype expires_on: ~datetime.datetime
:ivar ice_servers: Required. An array representing the credentials and the STUN/TURN server
URLs for use in ICE negotiations.
:vartype ice_servers: list[~azure.communication.networktraversal.models.CommunicationIceServer]
"""
_validation = {
'expires_on': {'required': True},
'ice_servers': {'required': True},
}
_attribute_map = {
'expires_on': {'key': 'expiresOn', 'type': 'iso-8601'},
'ice_servers': {'key': 'iceServers', 'type': '[CommunicationIceServer]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword expires_on: Required. The date for which the username and credentials are not longer
valid.
:paramtype expires_on: ~datetime.datetime
:keyword ice_servers: Required. An array representing the credentials and the STUN/TURN server
URLs for use in ICE negotiations.
:paramtype ice_servers:
list[~azure.communication.networktraversal.models.CommunicationIceServer]
"""
super(CommunicationRelayConfiguration, self).__init__(**kwargs)
self.expires_on = kwargs['expires_on']
self.ice_servers = kwargs['ice_servers']
class CommunicationRelayConfigurationRequest(msrest.serialization.Model):
"""Request for a CommunicationRelayConfiguration.
:ivar id: An existing ACS identity.
:vartype id: str
:ivar route_type: The routing methodology to where the ICE server will be located from the
client. Possible values include: "any", "nearest".
:vartype route_type: str or ~azure.communication.networktraversal.models.RouteType
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'route_type': {'key': 'routeType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword id: An existing ACS identity.
:paramtype id: str
:keyword route_type: The routing methodology to where the ICE server will be located from the
client. Possible values include: "any", "nearest".
:paramtype route_type: str or ~azure.communication.networktraversal.models.RouteType
"""
super(CommunicationRelayConfigurationRequest, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.route_type = kwargs.get('route_type', None)
|
en
| 0.607889
|
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- The Communication Services error. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar code: Required. The error code. :vartype code: str :ivar message: Required. The error message. :vartype message: str :ivar target: The error target. :vartype target: str :ivar details: Further details about specific errors that led to this error. :vartype details: list[~azure.communication.networktraversal.models.CommunicationError] :ivar inner_error: The inner error if any. :vartype inner_error: ~azure.communication.networktraversal.models.CommunicationError :keyword code: Required. The error code. :paramtype code: str :keyword message: Required. The error message. :paramtype message: str The Communication Services error. All required parameters must be populated in order to send to Azure. :ivar error: Required. The Communication Services error. :vartype error: ~azure.communication.networktraversal.models.CommunicationError :keyword error: Required. The Communication Services error. :paramtype error: ~azure.communication.networktraversal.models.CommunicationError An instance of a STUN/TURN server with credentials to be used for ICE negotiation. All required parameters must be populated in order to send to Azure. :ivar urls: Required. List of STUN/TURN server URLs. :vartype urls: list[str] :ivar username: Required. User account name which uniquely identifies the credentials. :vartype username: str :ivar credential: Required. Credential for the server. :vartype credential: str :ivar route_type: Required. The routing methodology to where the ICE server will be located from the client. Possible values include: "any", "nearest". :vartype route_type: str or ~azure.communication.networktraversal.models.RouteType :keyword urls: Required. List of STUN/TURN server URLs. :paramtype urls: list[str] :keyword username: Required. User account name which uniquely identifies the credentials. :paramtype username: str :keyword credential: Required. Credential for the server. :paramtype credential: str :keyword route_type: Required. The routing methodology to where the ICE server will be located from the client. Possible values include: "any", "nearest". :paramtype route_type: str or ~azure.communication.networktraversal.models.RouteType A relay configuration containing the STUN/TURN URLs and credentials. All required parameters must be populated in order to send to Azure. :ivar expires_on: Required. The date for which the username and credentials are not longer valid. :vartype expires_on: ~datetime.datetime :ivar ice_servers: Required. An array representing the credentials and the STUN/TURN server URLs for use in ICE negotiations. :vartype ice_servers: list[~azure.communication.networktraversal.models.CommunicationIceServer] :keyword expires_on: Required. The date for which the username and credentials are not longer valid. :paramtype expires_on: ~datetime.datetime :keyword ice_servers: Required. An array representing the credentials and the STUN/TURN server URLs for use in ICE negotiations. :paramtype ice_servers: list[~azure.communication.networktraversal.models.CommunicationIceServer] Request for a CommunicationRelayConfiguration. :ivar id: An existing ACS identity. :vartype id: str :ivar route_type: The routing methodology to where the ICE server will be located from the client. Possible values include: "any", "nearest". :vartype route_type: str or ~azure.communication.networktraversal.models.RouteType :keyword id: An existing ACS identity. :paramtype id: str :keyword route_type: The routing methodology to where the ICE server will be located from the client. Possible values include: "any", "nearest". :paramtype route_type: str or ~azure.communication.networktraversal.models.RouteType
| 2.102725
| 2
|
pkgcore/test/restrictions/test_util.py
|
pombreda/pkgcore
| 1
|
6629240
|
<filename>pkgcore/test/restrictions/test_util.py
# Copyright: 2006 <NAME> <<EMAIL>>
# License: GPL2/BSD
from pkgcore.restrictions import util, packages, values
from pkgcore.test import TestCase
class Test_collect_package_restrictions(TestCase):
def test_collect_all(self):
prs = [packages.PackageRestriction("category", values.AlwaysTrue)] * 10
self.assertEqual(
list(util.collect_package_restrictions(packages.AndRestriction(
packages.OrRestriction(), packages.AndRestriction(),
*prs))),
prs)
def test_collect_specific(self):
prs = {}
for x in ("category", "package", "version", "iuse"):
prs[x] = packages.PackageRestriction(x, values.AlwaysTrue)
r = packages.AndRestriction(
packages.OrRestriction(*prs.values()), packages.AlwaysTrue)
for k, v in prs.iteritems():
self.assertEqual(
list(util.collect_package_restrictions(r, attrs=[k])),
[v])
r = packages.AndRestriction(packages.OrRestriction(
*prs.values()), *prs.values())
for k, v in prs.iteritems():
self.assertEqual(
list(util.collect_package_restrictions(r, attrs=[k])),
[v] * 2)
|
<filename>pkgcore/test/restrictions/test_util.py
# Copyright: 2006 <NAME> <<EMAIL>>
# License: GPL2/BSD
from pkgcore.restrictions import util, packages, values
from pkgcore.test import TestCase
class Test_collect_package_restrictions(TestCase):
def test_collect_all(self):
prs = [packages.PackageRestriction("category", values.AlwaysTrue)] * 10
self.assertEqual(
list(util.collect_package_restrictions(packages.AndRestriction(
packages.OrRestriction(), packages.AndRestriction(),
*prs))),
prs)
def test_collect_specific(self):
prs = {}
for x in ("category", "package", "version", "iuse"):
prs[x] = packages.PackageRestriction(x, values.AlwaysTrue)
r = packages.AndRestriction(
packages.OrRestriction(*prs.values()), packages.AlwaysTrue)
for k, v in prs.iteritems():
self.assertEqual(
list(util.collect_package_restrictions(r, attrs=[k])),
[v])
r = packages.AndRestriction(packages.OrRestriction(
*prs.values()), *prs.values())
for k, v in prs.iteritems():
self.assertEqual(
list(util.collect_package_restrictions(r, attrs=[k])),
[v] * 2)
|
en
| 0.275257
|
# Copyright: 2006 <NAME> <<EMAIL>> # License: GPL2/BSD
| 2.093761
| 2
|
uniter_model/tests/generate_test_data.py
|
intersun/LightningDOT
| 64
|
6629241
|
"""
minimal running script of distributed training
"""
import argparse
import random
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence
from torch import optim
# communication operations
from utils.distributed import all_reduce_and_rescale_tensors, all_gather_list
class DataLoader(object):
def __init__(self, vocab_size, n_class, batch_size=8, lengths=(5, 10)):
self.vsize = vocab_size
self.ncls = n_class
self.bs = batch_size
self.lengths = lengths
def __iter__(self):
while True:
input_, target = self._random_batch()
yield input_, target
def _random_batch(self):
inputs = []
targets = []
for _ in range(self.bs):
i, t = self._random_inputs()
inputs.append(i)
targets.append(t)
input_ = pad_sequence(inputs)
targets = torch.LongTensor(targets)
return input_, targets
def _random_inputs(self):
len_ = random.randint(*self.lengths)
inputs = [random.randint(0, self.vsize-1) for _ in range(len_)]
target = random.randint(0, self.ncls-1)
return torch.LongTensor(inputs), target
class Model(nn.Module):
def __init__(self, vsize, ncls):
super().__init__()
self.emb = nn.Embedding(vsize, 100)
self.rnn = nn.LSTM(100, 100, 1)
self.proj = nn.Linear(100, ncls)
def forward(self, input_):
emb_out = self.emb(input_)
_, (h, c) = self.rnn(emb_out)
output = self.proj(h[-1])
return output
class InputExample(object):
def __init__(self, input, target):
self.input = input
self.target = target
def main():
vsize = 200
ncls = 10
accum = 4
total_step = 100
seed = 777
total_step = 100
random.seed(seed)
torch.manual_seed(seed)
global_step = 0
loader = DataLoader(vsize, ncls)
examples = []
print ("example generating")
for step, (input_, target) in enumerate(loader):
print ("example appended" + str(step))
examples.append(InputExample(input=input_, target = target))
global_step += 1
if global_step >= total_step:
break
print ("saving torch.save")
torch.save(examples, 'data/test_data/input0.txt')
examples = torch.load('data/test_data/input.txt')
for step, ie in enumerate(examples):
print (step)
print (ie.input)
print (ie.target)
if __name__ == '__main__':
main()
|
"""
minimal running script of distributed training
"""
import argparse
import random
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence
from torch import optim
# communication operations
from utils.distributed import all_reduce_and_rescale_tensors, all_gather_list
class DataLoader(object):
def __init__(self, vocab_size, n_class, batch_size=8, lengths=(5, 10)):
self.vsize = vocab_size
self.ncls = n_class
self.bs = batch_size
self.lengths = lengths
def __iter__(self):
while True:
input_, target = self._random_batch()
yield input_, target
def _random_batch(self):
inputs = []
targets = []
for _ in range(self.bs):
i, t = self._random_inputs()
inputs.append(i)
targets.append(t)
input_ = pad_sequence(inputs)
targets = torch.LongTensor(targets)
return input_, targets
def _random_inputs(self):
len_ = random.randint(*self.lengths)
inputs = [random.randint(0, self.vsize-1) for _ in range(len_)]
target = random.randint(0, self.ncls-1)
return torch.LongTensor(inputs), target
class Model(nn.Module):
def __init__(self, vsize, ncls):
super().__init__()
self.emb = nn.Embedding(vsize, 100)
self.rnn = nn.LSTM(100, 100, 1)
self.proj = nn.Linear(100, ncls)
def forward(self, input_):
emb_out = self.emb(input_)
_, (h, c) = self.rnn(emb_out)
output = self.proj(h[-1])
return output
class InputExample(object):
def __init__(self, input, target):
self.input = input
self.target = target
def main():
vsize = 200
ncls = 10
accum = 4
total_step = 100
seed = 777
total_step = 100
random.seed(seed)
torch.manual_seed(seed)
global_step = 0
loader = DataLoader(vsize, ncls)
examples = []
print ("example generating")
for step, (input_, target) in enumerate(loader):
print ("example appended" + str(step))
examples.append(InputExample(input=input_, target = target))
global_step += 1
if global_step >= total_step:
break
print ("saving torch.save")
torch.save(examples, 'data/test_data/input0.txt')
examples = torch.load('data/test_data/input.txt')
for step, ie in enumerate(examples):
print (step)
print (ie.input)
print (ie.target)
if __name__ == '__main__':
main()
|
en
| 0.897708
|
minimal running script of distributed training # communication operations
| 2.459665
| 2
|
iter8_analytics/api/analytics/metrics.py
|
huang195/iter8-analytics
| 0
|
6629242
|
"""Module containing classes and methods for querying prometheus and returning metric data.
"""
# core python dependencies
from datetime import datetime, timedelta, timezone
from uuid import UUID
from typing import Dict, Iterable, Any, Union
import logging
import requests
from string import Template
import math
# external module dependencies
from pydantic import BaseModel, Field
# iter8 dependencies
from iter8_analytics.api.analytics.types import *
import iter8_analytics.constants as constants
from iter8_analytics.config import env_config
logger = logging.getLogger('iter8_analytics')
def new_ratio_max_min(metric_id_to_list_of_values: Dict[iter8id, Iterable[float]]):
"""Return min and max for each ratio metric
Args:
metric_id_to_list_of_values (Dict[iter8d, Iterable[float]]): dictionary whose keys are metric ids and whose values are a list of values seen for each emtric
Returns:
max_min_lists (Dict[iter8id, RatioMaxMin): dictionary whose keys are metric ids and whose values are an object for each metric containing its min and max
"""
max_min_lists = {
metric_id: [None, None] for metric_id in metric_id_to_list_of_values
}
for metric_id in metric_id_to_list_of_values:
try:
max_min_lists[metric_id][0], max_min_lists[metric_id][1] = min(metric_id_to_list_of_values[metric_id]), max(metric_id_to_list_of_values[metric_id])
except:
logger.debug("Empty list of values found for metric %s", metric_id)
max_min_lists[metric_id] = RatioMaxMin(
minimum = max_min_lists[metric_id][0],
maximum = max_min_lists[metric_id][1]
)
"""if the list of values is empty for a metric id, return None values for max and min
"""
return max_min_lists
def get_counter_metrics(
counter_metric_specs: Dict[iter8id, CounterMetricSpec],
versions: Iterable[Version],
start_time) -> Dict[iter8id, Dict[iter8id, CounterDataPoint]]:
"""Query prometheus and get counter metric data for given set of counter metrics and versions.
Args:
counter_metric_specs (Dict[iter8id, CounterMetricSpec]): dictionary whose values are the counter metric specs and whose keys are counter metric ids.
versions (Iterable[Version]): A iterable of version objects.
start_time (datetime): start time which dictates the duration parameter used in the query.
Returns:
Dict[iter8id, Dict[iter8id, CounterDataPoint]]: dictionary whose keys are version ids and whose values are dictionaries. The inner dictionary has keys which are metric ids and values which are current counter data point values. For e.g.:
{
"version1": {
"metric1": CounterDataPoint(...),
"metric2": CounterDataPoint(...)
},
"version2": {
"metric1": CounterDataPoint(...),
"metric2": CounterDataPoint(...)
}
}
"""
cmd = {version.id: {} for version in versions} # initialize cmd
# populate cmd
for counter_metric_spec in counter_metric_specs.values():
query_spec = CounterQuerySpec(
version_label_keys = versions[0].version_labels.keys(),
query_template = counter_metric_spec.query_template,
start_time = start_time
)
pcmq = PrometheusCounterMetricQuery(query_spec, versions)
current_time = datetime.now(timezone.utc)
cmd_from_prom = pcmq.query_from_spec(current_time)
status = StatusEnum.zeroed_counter if cmd_from_prom else StatusEnum.no_versions_in_prom_response
for version in versions:
if version.id in cmd_from_prom:
cmd[version.id][counter_metric_spec.id] = cmd_from_prom[version.id]
else:
cmd[version.id][counter_metric_spec.id] = CounterDataPoint(
value = 0,
timestamp = current_time,
status = status
)
"""if a version cannot be found in the list of counter metrics returned by prometheus, then the value of the counter is assumed to be zero
"""
return cmd
def get_ratio_metrics(
ratio_metric_specs: Dict[iter8id, RatioMetricSpec],
counter_metric_specs: Dict[iter8id, CounterMetricSpec],
counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]],
versions: Iterable[Version],
start_time: datetime) -> Dict[iter8id, Dict[iter8id, RatioDataPoint]]:
"""Query prometheus and get ratio metric data for given set of ratio metrics and versions.
Args:
ratio_metric_specs (Dict[iter8id, RatioMetricSpec]): dictionary whose values are the ratio metric specs and whose keys are ratio metric ids
counter_metric_specs (Dict[iter8id, CounterMetricSpec]): dictionary whose values are the counter metric specs and whose keys are counter metric ids.
counter_metrics (Dict[iter8id, Dict[iter8id, CounterDataPoint]]): dictionary whose keys are version ids and whose values are dictionaries. The inner dictionary has keys which are metric ids and values which are current counter data point values. Typically, the object returned by get_counter_metrics(...) method will be used as the value of this argument.
versions (Iterable[Version]): A iterable of version objects.
start_time (datetime): start time which dictates the duration parameter used in the query.
Returns:
Dict[iter8id, Dict[iter8id, RatioDataPoint]]: dictionary whose keys are version ids and whose values are dictionaries. The inner dictionary has keys which are metric ids and values which are current ratio data point values. For e.g.:
{
"version1": {
"metric1": RatioDataPoint(...),
"metric2": RatioDataPoint(...)
},
"version2": {
"metric1": RatioDataPoint(...),
"metric2": RatioDataPoint(...)
}
}
"""
rmd = {version.id: {} for version in versions} # initialize rmd
# populate rmd
for ratio_metric_spec in ratio_metric_specs.values():
query_spec = RatioQuerySpec(
version_label_keys = versions[0].version_labels.keys(),
numerator_template = counter_metric_specs[ratio_metric_spec.numerator].query_template,
denominator_template = counter_metric_specs[ratio_metric_spec.denominator].query_template,
start_time = start_time
)
prmq = PrometheusRatioMetricQuery(query_spec, versions)
current_time = datetime.now(timezone.utc)
rmd_from_prom = prmq.query_from_spec(current_time)
for version in versions:
if version.id in rmd_from_prom:
rmd[version.id][ratio_metric_spec.id] = rmd_from_prom[version.id]
else:
if version.id in counter_metrics and counter_metrics[version.id][ratio_metric_spec.denominator].value:
rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(
value = 0,
timestamp = current_time,
status = StatusEnum.zeroed_ratio
)
else:
rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(
value = None,
timestamp = current_time,
status = StatusEnum.absent_version_in_prom_response
)
"""if a version cannot be found in the list of ratio metrics returned by prometheus, then the value of the ratio is set to zero if denominator is non-zero, and is set to None otherwise.
"""
return rmd
class PrometheusMetricQuery():
"""Base class for querying prometheus.
Attributes:
prometheus_url (str): Prom url for quering
query_spec (QuerySpec): Query spec for prom query
version_labels_to_id (Dict[Set[Tuple[str, str]], str]): Dictionary mapping version labels to their ids
"""
def __init__(self, query_spec, versions):
"""Initialize prometheus metric query object.
Args:
query_spec (QuerySpec): Prom query spec
versions (Iterable[Version]): Iterable of Version objects.
"""
prometheus_url = env_config[constants.METRICS_BACKEND_CONFIG_URL]
self.prometheus_url = prometheus_url + "/api/v1/query"
self.query_spec = query_spec
self.version_labels_to_id = {
frozenset(version.version_labels.items()): version.id for version in versions
}
"""the above frozenset maps from version labels to version ids
"""
def query_from_spec(self, current_time):
"""Query prometheus using query spec.
Args:
current_time (datetime): Current time needed to compute duration value within the query.
Returns:
query_result (Dict[str, Dict[str, DataPoint]]]): Post processed query result
"""
interval = int((current_time - self.query_spec.start_time).total_seconds())
kwargs = {
"interval": f"{interval}s",
"version_labels": ",".join(self.query_spec.version_label_keys) # also hard coded
}
query = self.get_query(kwargs)
return self.query(query, current_time)
def query(self, query, current_time):
"""Query prometheus using query parameters.
Args:
current_time (datetime): Current time needed to compute duration value within the query.
Returns:
query_result (Dict[str, Dict[str, DataPoint]]]): Post processed query result
Raises:
Exception: HTTP connection errors related to prom requests.
"""
params = {'query': query}
try:
query_result = requests.get(self.prometheus_url, params=params).json()
logger.debug("query result -- raw")
logger.debug(query_result)
except Exception as e:
logger.error("Error while attempting to connect to prometheus")
raise(e)
return self.post_process(query_result, current_time)
def post_process(self, raw_query_result, ts):
"""Post process prom query result
Args:
raw_query_result ({
"data": {
"result": {
"value": [float], # sequence whose element index 1 is the metric value
"metric": Dict[str, str] # version labels
}
}
}): Raw prometheus result
ts (datetime): time stamp at which prom query was made
Returns:
query_result (Dict[str, Dict[str, DataPoint]]]): Post processed query result
Raises:
ValueError: If query was unsuccessful for reasons such as bad template, prom result contained no data or returned data in a non-vector form which cannot be post processed.
"""
prom_result = {}
if raw_query_result["status"] != "success":
raise ValueError("Query did not succeed. Check your query template.")
elif "data" not in raw_query_result:
return ValueError("Query did not succeed. Prometheus returned without data.")
elif raw_query_result["data"]['resultType'] != 'vector':
return ValueError("Query succeeded but returned with a non-vector result. Check your query template.")
else: # query succeeded and we have some proper data to work with
results = raw_query_result["data"]["result"]
for result in results:
version_id = self.get_version_id(result['metric'])
if version_id:
prom_result[version_id] = self.result_value_to_data_point(result['value'][1], ts)
return prom_result
def get_version_id(self, version_labels):
"""Get version id from version labels.
Args:
version_labels (Dict[str, str]): Dictionary of labels and their values for a version
Returns:
version_id (str): id of the corresponding version
"""
return self.version_labels_to_id.get(frozenset(version_labels.items()), None)
class PrometheusCounterMetricQuery(PrometheusMetricQuery):
"""Derived class for querying prometheus for counter metric.
"""
def get_query(self, query_args):
"""Extrapolate query from counter query spec and query_args
Args:
query_args (Dict[str, str]): Dictionary of values of template variables in counter query spec
Returns:
query (str): The query string used for querying prom
"""
query_template = Template(self.query_spec.query_template)
query = query_template.substitute(**query_args)
logger.debug(f"Query: {query}")
return query
def result_value_to_data_point(self, result_value: str, ts: datetime) -> CounterDataPoint:
"""Convert prometheus result value in string format to CounterDataPoint
Args:
result_value (str): Raw prometheus result value
ts (datetime): time stamp at which prom query was made
Returns:
counter_data_point (CounterDataPoint): Counter data point
"""
result_float = float(result_value)
assert(not math.isnan(result_float))
return CounterDataPoint(
value = result_float,
timestamp = ts)
"""Counter data point can never have a None value
"""
class PrometheusRatioMetricQuery(PrometheusMetricQuery):
"""Derived class for querying prometheus for counter metric.
"""
def get_query(self, query_args):
"""Extrapolate query from ratio query spec and query_args
Args:
query_args (Dict[str, str]): Dictionary of values of template variables in query_spec
Returns:
query (str): The query string used for querying prom
"""
num_query_template = Template(self.query_spec.numerator_template)
num_query = num_query_template.substitute(**query_args)
den_query_template = Template(self.query_spec.denominator_template)
den_query = den_query_template.substitute(**query_args)
query = f"({num_query}) / ({den_query})"
logger.debug(f"Query: {query}")
return query
def result_value_to_data_point(self, result_value: str, ts: datetime) -> RatioDataPoint:
"""Convert prometheus result value in string format to RatioDataPoint
Args:
result_value (str): Raw prometheus result value
ts (datetime): time stamp at which prom query was made
Returns:
ratio_data_point (RatioDataPoint): Ratio data point
"""
result_float = float(result_value)
return RatioDataPoint(
value = None,
timestamp = ts,
status = StatusEnum.nan_value
) if math.isnan(result_float) else RatioDataPoint(
value = result_float,
timestamp = ts
)
|
"""Module containing classes and methods for querying prometheus and returning metric data.
"""
# core python dependencies
from datetime import datetime, timedelta, timezone
from uuid import UUID
from typing import Dict, Iterable, Any, Union
import logging
import requests
from string import Template
import math
# external module dependencies
from pydantic import BaseModel, Field
# iter8 dependencies
from iter8_analytics.api.analytics.types import *
import iter8_analytics.constants as constants
from iter8_analytics.config import env_config
logger = logging.getLogger('iter8_analytics')
def new_ratio_max_min(metric_id_to_list_of_values: Dict[iter8id, Iterable[float]]):
"""Return min and max for each ratio metric
Args:
metric_id_to_list_of_values (Dict[iter8d, Iterable[float]]): dictionary whose keys are metric ids and whose values are a list of values seen for each emtric
Returns:
max_min_lists (Dict[iter8id, RatioMaxMin): dictionary whose keys are metric ids and whose values are an object for each metric containing its min and max
"""
max_min_lists = {
metric_id: [None, None] for metric_id in metric_id_to_list_of_values
}
for metric_id in metric_id_to_list_of_values:
try:
max_min_lists[metric_id][0], max_min_lists[metric_id][1] = min(metric_id_to_list_of_values[metric_id]), max(metric_id_to_list_of_values[metric_id])
except:
logger.debug("Empty list of values found for metric %s", metric_id)
max_min_lists[metric_id] = RatioMaxMin(
minimum = max_min_lists[metric_id][0],
maximum = max_min_lists[metric_id][1]
)
"""if the list of values is empty for a metric id, return None values for max and min
"""
return max_min_lists
def get_counter_metrics(
counter_metric_specs: Dict[iter8id, CounterMetricSpec],
versions: Iterable[Version],
start_time) -> Dict[iter8id, Dict[iter8id, CounterDataPoint]]:
"""Query prometheus and get counter metric data for given set of counter metrics and versions.
Args:
counter_metric_specs (Dict[iter8id, CounterMetricSpec]): dictionary whose values are the counter metric specs and whose keys are counter metric ids.
versions (Iterable[Version]): A iterable of version objects.
start_time (datetime): start time which dictates the duration parameter used in the query.
Returns:
Dict[iter8id, Dict[iter8id, CounterDataPoint]]: dictionary whose keys are version ids and whose values are dictionaries. The inner dictionary has keys which are metric ids and values which are current counter data point values. For e.g.:
{
"version1": {
"metric1": CounterDataPoint(...),
"metric2": CounterDataPoint(...)
},
"version2": {
"metric1": CounterDataPoint(...),
"metric2": CounterDataPoint(...)
}
}
"""
cmd = {version.id: {} for version in versions} # initialize cmd
# populate cmd
for counter_metric_spec in counter_metric_specs.values():
query_spec = CounterQuerySpec(
version_label_keys = versions[0].version_labels.keys(),
query_template = counter_metric_spec.query_template,
start_time = start_time
)
pcmq = PrometheusCounterMetricQuery(query_spec, versions)
current_time = datetime.now(timezone.utc)
cmd_from_prom = pcmq.query_from_spec(current_time)
status = StatusEnum.zeroed_counter if cmd_from_prom else StatusEnum.no_versions_in_prom_response
for version in versions:
if version.id in cmd_from_prom:
cmd[version.id][counter_metric_spec.id] = cmd_from_prom[version.id]
else:
cmd[version.id][counter_metric_spec.id] = CounterDataPoint(
value = 0,
timestamp = current_time,
status = status
)
"""if a version cannot be found in the list of counter metrics returned by prometheus, then the value of the counter is assumed to be zero
"""
return cmd
def get_ratio_metrics(
ratio_metric_specs: Dict[iter8id, RatioMetricSpec],
counter_metric_specs: Dict[iter8id, CounterMetricSpec],
counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]],
versions: Iterable[Version],
start_time: datetime) -> Dict[iter8id, Dict[iter8id, RatioDataPoint]]:
"""Query prometheus and get ratio metric data for given set of ratio metrics and versions.
Args:
ratio_metric_specs (Dict[iter8id, RatioMetricSpec]): dictionary whose values are the ratio metric specs and whose keys are ratio metric ids
counter_metric_specs (Dict[iter8id, CounterMetricSpec]): dictionary whose values are the counter metric specs and whose keys are counter metric ids.
counter_metrics (Dict[iter8id, Dict[iter8id, CounterDataPoint]]): dictionary whose keys are version ids and whose values are dictionaries. The inner dictionary has keys which are metric ids and values which are current counter data point values. Typically, the object returned by get_counter_metrics(...) method will be used as the value of this argument.
versions (Iterable[Version]): A iterable of version objects.
start_time (datetime): start time which dictates the duration parameter used in the query.
Returns:
Dict[iter8id, Dict[iter8id, RatioDataPoint]]: dictionary whose keys are version ids and whose values are dictionaries. The inner dictionary has keys which are metric ids and values which are current ratio data point values. For e.g.:
{
"version1": {
"metric1": RatioDataPoint(...),
"metric2": RatioDataPoint(...)
},
"version2": {
"metric1": RatioDataPoint(...),
"metric2": RatioDataPoint(...)
}
}
"""
rmd = {version.id: {} for version in versions} # initialize rmd
# populate rmd
for ratio_metric_spec in ratio_metric_specs.values():
query_spec = RatioQuerySpec(
version_label_keys = versions[0].version_labels.keys(),
numerator_template = counter_metric_specs[ratio_metric_spec.numerator].query_template,
denominator_template = counter_metric_specs[ratio_metric_spec.denominator].query_template,
start_time = start_time
)
prmq = PrometheusRatioMetricQuery(query_spec, versions)
current_time = datetime.now(timezone.utc)
rmd_from_prom = prmq.query_from_spec(current_time)
for version in versions:
if version.id in rmd_from_prom:
rmd[version.id][ratio_metric_spec.id] = rmd_from_prom[version.id]
else:
if version.id in counter_metrics and counter_metrics[version.id][ratio_metric_spec.denominator].value:
rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(
value = 0,
timestamp = current_time,
status = StatusEnum.zeroed_ratio
)
else:
rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(
value = None,
timestamp = current_time,
status = StatusEnum.absent_version_in_prom_response
)
"""if a version cannot be found in the list of ratio metrics returned by prometheus, then the value of the ratio is set to zero if denominator is non-zero, and is set to None otherwise.
"""
return rmd
class PrometheusMetricQuery():
"""Base class for querying prometheus.
Attributes:
prometheus_url (str): Prom url for quering
query_spec (QuerySpec): Query spec for prom query
version_labels_to_id (Dict[Set[Tuple[str, str]], str]): Dictionary mapping version labels to their ids
"""
def __init__(self, query_spec, versions):
"""Initialize prometheus metric query object.
Args:
query_spec (QuerySpec): Prom query spec
versions (Iterable[Version]): Iterable of Version objects.
"""
prometheus_url = env_config[constants.METRICS_BACKEND_CONFIG_URL]
self.prometheus_url = prometheus_url + "/api/v1/query"
self.query_spec = query_spec
self.version_labels_to_id = {
frozenset(version.version_labels.items()): version.id for version in versions
}
"""the above frozenset maps from version labels to version ids
"""
def query_from_spec(self, current_time):
"""Query prometheus using query spec.
Args:
current_time (datetime): Current time needed to compute duration value within the query.
Returns:
query_result (Dict[str, Dict[str, DataPoint]]]): Post processed query result
"""
interval = int((current_time - self.query_spec.start_time).total_seconds())
kwargs = {
"interval": f"{interval}s",
"version_labels": ",".join(self.query_spec.version_label_keys) # also hard coded
}
query = self.get_query(kwargs)
return self.query(query, current_time)
def query(self, query, current_time):
"""Query prometheus using query parameters.
Args:
current_time (datetime): Current time needed to compute duration value within the query.
Returns:
query_result (Dict[str, Dict[str, DataPoint]]]): Post processed query result
Raises:
Exception: HTTP connection errors related to prom requests.
"""
params = {'query': query}
try:
query_result = requests.get(self.prometheus_url, params=params).json()
logger.debug("query result -- raw")
logger.debug(query_result)
except Exception as e:
logger.error("Error while attempting to connect to prometheus")
raise(e)
return self.post_process(query_result, current_time)
def post_process(self, raw_query_result, ts):
"""Post process prom query result
Args:
raw_query_result ({
"data": {
"result": {
"value": [float], # sequence whose element index 1 is the metric value
"metric": Dict[str, str] # version labels
}
}
}): Raw prometheus result
ts (datetime): time stamp at which prom query was made
Returns:
query_result (Dict[str, Dict[str, DataPoint]]]): Post processed query result
Raises:
ValueError: If query was unsuccessful for reasons such as bad template, prom result contained no data or returned data in a non-vector form which cannot be post processed.
"""
prom_result = {}
if raw_query_result["status"] != "success":
raise ValueError("Query did not succeed. Check your query template.")
elif "data" not in raw_query_result:
return ValueError("Query did not succeed. Prometheus returned without data.")
elif raw_query_result["data"]['resultType'] != 'vector':
return ValueError("Query succeeded but returned with a non-vector result. Check your query template.")
else: # query succeeded and we have some proper data to work with
results = raw_query_result["data"]["result"]
for result in results:
version_id = self.get_version_id(result['metric'])
if version_id:
prom_result[version_id] = self.result_value_to_data_point(result['value'][1], ts)
return prom_result
def get_version_id(self, version_labels):
"""Get version id from version labels.
Args:
version_labels (Dict[str, str]): Dictionary of labels and their values for a version
Returns:
version_id (str): id of the corresponding version
"""
return self.version_labels_to_id.get(frozenset(version_labels.items()), None)
class PrometheusCounterMetricQuery(PrometheusMetricQuery):
"""Derived class for querying prometheus for counter metric.
"""
def get_query(self, query_args):
"""Extrapolate query from counter query spec and query_args
Args:
query_args (Dict[str, str]): Dictionary of values of template variables in counter query spec
Returns:
query (str): The query string used for querying prom
"""
query_template = Template(self.query_spec.query_template)
query = query_template.substitute(**query_args)
logger.debug(f"Query: {query}")
return query
def result_value_to_data_point(self, result_value: str, ts: datetime) -> CounterDataPoint:
"""Convert prometheus result value in string format to CounterDataPoint
Args:
result_value (str): Raw prometheus result value
ts (datetime): time stamp at which prom query was made
Returns:
counter_data_point (CounterDataPoint): Counter data point
"""
result_float = float(result_value)
assert(not math.isnan(result_float))
return CounterDataPoint(
value = result_float,
timestamp = ts)
"""Counter data point can never have a None value
"""
class PrometheusRatioMetricQuery(PrometheusMetricQuery):
"""Derived class for querying prometheus for counter metric.
"""
def get_query(self, query_args):
"""Extrapolate query from ratio query spec and query_args
Args:
query_args (Dict[str, str]): Dictionary of values of template variables in query_spec
Returns:
query (str): The query string used for querying prom
"""
num_query_template = Template(self.query_spec.numerator_template)
num_query = num_query_template.substitute(**query_args)
den_query_template = Template(self.query_spec.denominator_template)
den_query = den_query_template.substitute(**query_args)
query = f"({num_query}) / ({den_query})"
logger.debug(f"Query: {query}")
return query
def result_value_to_data_point(self, result_value: str, ts: datetime) -> RatioDataPoint:
"""Convert prometheus result value in string format to RatioDataPoint
Args:
result_value (str): Raw prometheus result value
ts (datetime): time stamp at which prom query was made
Returns:
ratio_data_point (RatioDataPoint): Ratio data point
"""
result_float = float(result_value)
return RatioDataPoint(
value = None,
timestamp = ts,
status = StatusEnum.nan_value
) if math.isnan(result_float) else RatioDataPoint(
value = result_float,
timestamp = ts
)
|
en
| 0.699964
|
Module containing classes and methods for querying prometheus and returning metric data. # core python dependencies # external module dependencies # iter8 dependencies Return min and max for each ratio metric Args: metric_id_to_list_of_values (Dict[iter8d, Iterable[float]]): dictionary whose keys are metric ids and whose values are a list of values seen for each emtric Returns: max_min_lists (Dict[iter8id, RatioMaxMin): dictionary whose keys are metric ids and whose values are an object for each metric containing its min and max if the list of values is empty for a metric id, return None values for max and min Query prometheus and get counter metric data for given set of counter metrics and versions. Args: counter_metric_specs (Dict[iter8id, CounterMetricSpec]): dictionary whose values are the counter metric specs and whose keys are counter metric ids. versions (Iterable[Version]): A iterable of version objects. start_time (datetime): start time which dictates the duration parameter used in the query. Returns: Dict[iter8id, Dict[iter8id, CounterDataPoint]]: dictionary whose keys are version ids and whose values are dictionaries. The inner dictionary has keys which are metric ids and values which are current counter data point values. For e.g.: { "version1": { "metric1": CounterDataPoint(...), "metric2": CounterDataPoint(...) }, "version2": { "metric1": CounterDataPoint(...), "metric2": CounterDataPoint(...) } } # initialize cmd # populate cmd if a version cannot be found in the list of counter metrics returned by prometheus, then the value of the counter is assumed to be zero Query prometheus and get ratio metric data for given set of ratio metrics and versions. Args: ratio_metric_specs (Dict[iter8id, RatioMetricSpec]): dictionary whose values are the ratio metric specs and whose keys are ratio metric ids counter_metric_specs (Dict[iter8id, CounterMetricSpec]): dictionary whose values are the counter metric specs and whose keys are counter metric ids. counter_metrics (Dict[iter8id, Dict[iter8id, CounterDataPoint]]): dictionary whose keys are version ids and whose values are dictionaries. The inner dictionary has keys which are metric ids and values which are current counter data point values. Typically, the object returned by get_counter_metrics(...) method will be used as the value of this argument. versions (Iterable[Version]): A iterable of version objects. start_time (datetime): start time which dictates the duration parameter used in the query. Returns: Dict[iter8id, Dict[iter8id, RatioDataPoint]]: dictionary whose keys are version ids and whose values are dictionaries. The inner dictionary has keys which are metric ids and values which are current ratio data point values. For e.g.: { "version1": { "metric1": RatioDataPoint(...), "metric2": RatioDataPoint(...) }, "version2": { "metric1": RatioDataPoint(...), "metric2": RatioDataPoint(...) } } # initialize rmd # populate rmd if a version cannot be found in the list of ratio metrics returned by prometheus, then the value of the ratio is set to zero if denominator is non-zero, and is set to None otherwise. Base class for querying prometheus. Attributes: prometheus_url (str): Prom url for quering query_spec (QuerySpec): Query spec for prom query version_labels_to_id (Dict[Set[Tuple[str, str]], str]): Dictionary mapping version labels to their ids Initialize prometheus metric query object. Args: query_spec (QuerySpec): Prom query spec versions (Iterable[Version]): Iterable of Version objects. the above frozenset maps from version labels to version ids Query prometheus using query spec. Args: current_time (datetime): Current time needed to compute duration value within the query. Returns: query_result (Dict[str, Dict[str, DataPoint]]]): Post processed query result # also hard coded Query prometheus using query parameters. Args: current_time (datetime): Current time needed to compute duration value within the query. Returns: query_result (Dict[str, Dict[str, DataPoint]]]): Post processed query result Raises: Exception: HTTP connection errors related to prom requests. Post process prom query result Args: raw_query_result ({ "data": { "result": { "value": [float], # sequence whose element index 1 is the metric value "metric": Dict[str, str] # version labels } } }): Raw prometheus result ts (datetime): time stamp at which prom query was made Returns: query_result (Dict[str, Dict[str, DataPoint]]]): Post processed query result Raises: ValueError: If query was unsuccessful for reasons such as bad template, prom result contained no data or returned data in a non-vector form which cannot be post processed. # query succeeded and we have some proper data to work with Get version id from version labels. Args: version_labels (Dict[str, str]): Dictionary of labels and their values for a version Returns: version_id (str): id of the corresponding version Derived class for querying prometheus for counter metric. Extrapolate query from counter query spec and query_args Args: query_args (Dict[str, str]): Dictionary of values of template variables in counter query spec Returns: query (str): The query string used for querying prom Convert prometheus result value in string format to CounterDataPoint Args: result_value (str): Raw prometheus result value ts (datetime): time stamp at which prom query was made Returns: counter_data_point (CounterDataPoint): Counter data point Counter data point can never have a None value Derived class for querying prometheus for counter metric. Extrapolate query from ratio query spec and query_args Args: query_args (Dict[str, str]): Dictionary of values of template variables in query_spec Returns: query (str): The query string used for querying prom Convert prometheus result value in string format to RatioDataPoint Args: result_value (str): Raw prometheus result value ts (datetime): time stamp at which prom query was made Returns: ratio_data_point (RatioDataPoint): Ratio data point
| 2.794782
| 3
|
ckanext/datastore/logic/auth.py
|
florianm/ckan
| 3
|
6629243
|
import ckan.plugins as p
def datastore_auth(context, data_dict, privilege='resource_update'):
if not 'id' in data_dict:
data_dict['id'] = data_dict.get('resource_id')
user = context.get('user')
authorized = p.toolkit.check_access(privilege, context, data_dict)
if not authorized:
return {
'success': False,
'msg': p.toolkit._('User {0} not authorized to update resource {1}'
.format(str(user), data_dict['id']))
}
else:
return {'success': True}
def datastore_create(context, data_dict):
if 'resource' in data_dict and data_dict['resource'].get('package_id'):
data_dict['id'] = data_dict['resource'].get('package_id')
privilege = 'package_update'
else:
privilege = 'resource_update'
return datastore_auth(context, data_dict, privilege=privilege)
def datastore_upsert(context, data_dict):
return datastore_auth(context, data_dict)
def datastore_delete(context, data_dict):
return datastore_auth(context, data_dict)
@p.toolkit.auth_allow_anonymous_access
def datastore_info(context, data_dict):
return datastore_auth(context, data_dict, 'resource_show')
@p.toolkit.auth_allow_anonymous_access
def datastore_search(context, data_dict):
return datastore_auth(context, data_dict, 'resource_show')
@p.toolkit.auth_allow_anonymous_access
def datastore_search_sql(context, data_dict):
return {'success': True}
def datastore_change_permissions(context, data_dict):
return datastore_auth(context, data_dict)
|
import ckan.plugins as p
def datastore_auth(context, data_dict, privilege='resource_update'):
if not 'id' in data_dict:
data_dict['id'] = data_dict.get('resource_id')
user = context.get('user')
authorized = p.toolkit.check_access(privilege, context, data_dict)
if not authorized:
return {
'success': False,
'msg': p.toolkit._('User {0} not authorized to update resource {1}'
.format(str(user), data_dict['id']))
}
else:
return {'success': True}
def datastore_create(context, data_dict):
if 'resource' in data_dict and data_dict['resource'].get('package_id'):
data_dict['id'] = data_dict['resource'].get('package_id')
privilege = 'package_update'
else:
privilege = 'resource_update'
return datastore_auth(context, data_dict, privilege=privilege)
def datastore_upsert(context, data_dict):
return datastore_auth(context, data_dict)
def datastore_delete(context, data_dict):
return datastore_auth(context, data_dict)
@p.toolkit.auth_allow_anonymous_access
def datastore_info(context, data_dict):
return datastore_auth(context, data_dict, 'resource_show')
@p.toolkit.auth_allow_anonymous_access
def datastore_search(context, data_dict):
return datastore_auth(context, data_dict, 'resource_show')
@p.toolkit.auth_allow_anonymous_access
def datastore_search_sql(context, data_dict):
return {'success': True}
def datastore_change_permissions(context, data_dict):
return datastore_auth(context, data_dict)
|
none
| 1
| 2.043536
| 2
|
|
GetData/DataFromTS.py
|
MarcusErz/TimeSeriesData
| 0
|
6629244
|
import math
import numpy as np
from sklearn.manifold import Isomap
from sklearn.preprocessing import StandardScaler
def data_from_ts(ts_values, window_size):
len_ts = len(ts_values)
len_networks = math.ceil(len_ts / window_size)
networks = []
print(len_networks)
for i in range(0, len_ts, window_size):
window = ts_values[i:i + window_size]
new_network = compute_network(window)
networks.append(new_network)
return networks
def compute_network(window):
len_window = len(window)
network = np.zeros((len_window, len_window))
for i in range(len_window):
network[:, i] = np.sqrt(np.array(abs(window[i] - window) ** 2, dtype=np.float64))
return network
def reduce_networks(networks):
sparsifyed_networks = []
for i in range(len(networks)):
net = networks[i]
iso_net = get_iso_net(net, 4, 2)
reduced_net = compute_multi_net(iso_net)
# normalize
# TODO Die Elemente auf der Hauptdiagonalen sollten eigentlich gleich sein sind sie aber nicht
scaler = StandardScaler()
scaler.fit(net)
a1 = scaler.transform(net)
scaler2 = StandardScaler()
scaler2.fit(reduced_net)
a2 = scaler2.transform(reduced_net)
difference = a1 - a2
sparsify_net = spar_net(reduced_net, difference)
# print('sparsify net: {}'.format(sparsify_net))
sparsifyed_networks.append(sparsify_net)
return sparsifyed_networks
def compute_multi_net(iso_net):
len_ts = iso_net.shape[0]
distance_matrix = np.zeros((len_ts, len_ts))
dim_ts = iso_net.ndim
for x in range(len_ts):
distance = np.zeros(len_ts)
for y in range(dim_ts):
distance = np.power(abs(iso_net[x, y] - iso_net[:, y]), 2) + distance
distance_matrix[x, :] = np.sqrt(distance)
return distance_matrix
def get_iso_net(net, neighbours, comps):
embedding = Isomap(n_neighbors=neighbours, n_components=comps)
net_transformed = embedding.fit_transform(net)
return net_transformed
def spar_net(reduced_net, difference):
len_ts = reduced_net.shape[0]
width_ts = reduced_net.shape[1]
sparsified_net = reduced_net.copy()
# TODO is std a good solution
std_diff = np.std(difference)
for i in range(len_ts):
for z in range(width_ts):
if -std_diff < difference[i, z] < std_diff:
sparsified_net[i, z] = 0
return sparsified_net
|
import math
import numpy as np
from sklearn.manifold import Isomap
from sklearn.preprocessing import StandardScaler
def data_from_ts(ts_values, window_size):
len_ts = len(ts_values)
len_networks = math.ceil(len_ts / window_size)
networks = []
print(len_networks)
for i in range(0, len_ts, window_size):
window = ts_values[i:i + window_size]
new_network = compute_network(window)
networks.append(new_network)
return networks
def compute_network(window):
len_window = len(window)
network = np.zeros((len_window, len_window))
for i in range(len_window):
network[:, i] = np.sqrt(np.array(abs(window[i] - window) ** 2, dtype=np.float64))
return network
def reduce_networks(networks):
sparsifyed_networks = []
for i in range(len(networks)):
net = networks[i]
iso_net = get_iso_net(net, 4, 2)
reduced_net = compute_multi_net(iso_net)
# normalize
# TODO Die Elemente auf der Hauptdiagonalen sollten eigentlich gleich sein sind sie aber nicht
scaler = StandardScaler()
scaler.fit(net)
a1 = scaler.transform(net)
scaler2 = StandardScaler()
scaler2.fit(reduced_net)
a2 = scaler2.transform(reduced_net)
difference = a1 - a2
sparsify_net = spar_net(reduced_net, difference)
# print('sparsify net: {}'.format(sparsify_net))
sparsifyed_networks.append(sparsify_net)
return sparsifyed_networks
def compute_multi_net(iso_net):
len_ts = iso_net.shape[0]
distance_matrix = np.zeros((len_ts, len_ts))
dim_ts = iso_net.ndim
for x in range(len_ts):
distance = np.zeros(len_ts)
for y in range(dim_ts):
distance = np.power(abs(iso_net[x, y] - iso_net[:, y]), 2) + distance
distance_matrix[x, :] = np.sqrt(distance)
return distance_matrix
def get_iso_net(net, neighbours, comps):
embedding = Isomap(n_neighbors=neighbours, n_components=comps)
net_transformed = embedding.fit_transform(net)
return net_transformed
def spar_net(reduced_net, difference):
len_ts = reduced_net.shape[0]
width_ts = reduced_net.shape[1]
sparsified_net = reduced_net.copy()
# TODO is std a good solution
std_diff = np.std(difference)
for i in range(len_ts):
for z in range(width_ts):
if -std_diff < difference[i, z] < std_diff:
sparsified_net[i, z] = 0
return sparsified_net
|
de
| 0.962331
|
# normalize # TODO Die Elemente auf der Hauptdiagonalen sollten eigentlich gleich sein sind sie aber nicht # print('sparsify net: {}'.format(sparsify_net)) # TODO is std a good solution
| 2.698492
| 3
|
entomb/processing.py
|
countermeasure/entomb
| 0
|
6629245
|
import datetime
import os
import subprocess
from entomb import (
exceptions,
utilities,
)
@utilities.hide_cursor()
def process_objects(path, immutable, include_git, dry_run):
"""Set or unset the immutable attribute for all files on a path.
Parameters
----------
path : str
An absolute path.
immutable: bool
Set immutable attributes if True, unset immutable attributes if False.
include_git: bool
Whether to include git files and directories.
dry_run: bool
Whether to do a dry run which makes no changes.
Returns
-------
None
Raises
------
AssertionError
If the path does not exist.
"""
# Parameter check.
assert os.path.exists(path)
# Set up.
attribute_changed_count = 0
attribute_settable_count = 0
errors = []
file_count = 0
link_count = 0
operation = "entombed" if immutable else "unset"
# Print the operation.
if immutable:
print("Entomb objects")
else:
print("Unset objects")
print()
# Print the progress header and set up the progress bar.
utilities.print_header("Progress")
total_file_paths = utilities.count_file_paths(path, include_git)
start_time = datetime.datetime.now()
utilities.print_progress_bar(start_time, 0, total_file_paths)
# Walk the tree.
for file_path in utilities.file_paths(path, include_git):
# Count links, but don't try to operate on them as they don't have
# an immutable attribute.
if os.path.islink(file_path):
link_count += 1
else:
# Change the file's attribute if necessary.
try:
attribute_was_changed = _process_object(
file_path,
immutable,
dry_run,
)
attribute_settable_count += 1
if attribute_was_changed:
attribute_changed_count += 1
except exceptions.SetAttributeError as error:
errors.append(error)
# Count the file.
file_count += 1
# Update the progress bar.
utilities.print_progress_bar(
start_time,
(file_count + link_count),
total_file_paths,
)
print()
print()
# Print the changes.
if file_count > 0:
utilities.print_header("Changes")
print("{} {} files".format(operation.title(), attribute_changed_count))
print()
# Print a summary.
utilities.print_header("Summary")
if file_count > 0:
print(
"All {} files for which immutability can be set are now {}"
.format(attribute_settable_count, operation),
)
print("All {} links were ignored".format(link_count))
else:
print("No files were found")
print()
# Print any errors.
_print_errors(errors)
def _print_errors(errors):
"""Print the list of errors resulting from file processing.
Parameters
----------
errors : list of str
A list of error messages.
Returns
-------
None
"""
# Return if there are no errors.
if not errors:
return
# Print the header.
utilities.print_header("Errors")
# Print up to 10 errors.
for error in errors[:10]:
print(">> {}".format(error))
# If there are more than 10 errors, print a message about how many more
# there are.
error_count = len(errors)
if error_count > 10:
unshown_errors = len(errors) - 10
print(">> Plus {} more errors".format(unshown_errors))
print()
def _process_object(path, immutable, dry_run):
"""Set or unset the immutable attribute for a file.
Parameters
----------
path : str
The absolute path of a file.
immutable: bool
Set immutable attribute if True, unset immutable attribute if False.
dry_run : bool
Whether to do a dry run which makes no changes.
Returns
-------
bool
Whether the immutable attribute was changed, or if this was a dry run,
should have been changed.
Raises
------
AssertionError
If the path is a directory, is a link or does not exist.
SetAttributeError
If the path's immutable attribute cannot be set.
"""
# Parameter check.
assert not os.path.isdir(path)
assert not os.path.islink(path)
assert os.path.exists(path)
try:
is_immutable = utilities.file_is_immutable(path)
except exceptions.GetAttributeError:
msg = "Immutable attribute not settable for {}".format(path)
raise exceptions.SetAttributeError(msg)
change_attribute = immutable != is_immutable
if change_attribute and not dry_run:
attribute = "+i" if immutable else "-i"
_set_attribute(attribute, path)
# The value of change_attribute is a proxy for whether the immutable
# attribute was changed, or if this was a dry run, should have been
# changed.
return change_attribute
def _set_attribute(attribute, path):
"""Set or unset an attribute for a file.
Parameters
----------
attribute: str
The attribute to be set. In the form of "+i" or "-i".
path : str
The absolute path of a file.
Returns
-------
None
Raises
------
SetAttributeError
If the exit status of the chattr command is non-zero.
"""
try:
subprocess.run(
["sudo", "chattr", attribute, path],
check=True,
stderr=subprocess.STDOUT,
stdout=subprocess.DEVNULL,
)
except subprocess.CalledProcessError:
msg = "Immutable attribute not settable for {}".format(path)
raise exceptions.SetAttributeError(msg)
|
import datetime
import os
import subprocess
from entomb import (
exceptions,
utilities,
)
@utilities.hide_cursor()
def process_objects(path, immutable, include_git, dry_run):
"""Set or unset the immutable attribute for all files on a path.
Parameters
----------
path : str
An absolute path.
immutable: bool
Set immutable attributes if True, unset immutable attributes if False.
include_git: bool
Whether to include git files and directories.
dry_run: bool
Whether to do a dry run which makes no changes.
Returns
-------
None
Raises
------
AssertionError
If the path does not exist.
"""
# Parameter check.
assert os.path.exists(path)
# Set up.
attribute_changed_count = 0
attribute_settable_count = 0
errors = []
file_count = 0
link_count = 0
operation = "entombed" if immutable else "unset"
# Print the operation.
if immutable:
print("Entomb objects")
else:
print("Unset objects")
print()
# Print the progress header and set up the progress bar.
utilities.print_header("Progress")
total_file_paths = utilities.count_file_paths(path, include_git)
start_time = datetime.datetime.now()
utilities.print_progress_bar(start_time, 0, total_file_paths)
# Walk the tree.
for file_path in utilities.file_paths(path, include_git):
# Count links, but don't try to operate on them as they don't have
# an immutable attribute.
if os.path.islink(file_path):
link_count += 1
else:
# Change the file's attribute if necessary.
try:
attribute_was_changed = _process_object(
file_path,
immutable,
dry_run,
)
attribute_settable_count += 1
if attribute_was_changed:
attribute_changed_count += 1
except exceptions.SetAttributeError as error:
errors.append(error)
# Count the file.
file_count += 1
# Update the progress bar.
utilities.print_progress_bar(
start_time,
(file_count + link_count),
total_file_paths,
)
print()
print()
# Print the changes.
if file_count > 0:
utilities.print_header("Changes")
print("{} {} files".format(operation.title(), attribute_changed_count))
print()
# Print a summary.
utilities.print_header("Summary")
if file_count > 0:
print(
"All {} files for which immutability can be set are now {}"
.format(attribute_settable_count, operation),
)
print("All {} links were ignored".format(link_count))
else:
print("No files were found")
print()
# Print any errors.
_print_errors(errors)
def _print_errors(errors):
"""Print the list of errors resulting from file processing.
Parameters
----------
errors : list of str
A list of error messages.
Returns
-------
None
"""
# Return if there are no errors.
if not errors:
return
# Print the header.
utilities.print_header("Errors")
# Print up to 10 errors.
for error in errors[:10]:
print(">> {}".format(error))
# If there are more than 10 errors, print a message about how many more
# there are.
error_count = len(errors)
if error_count > 10:
unshown_errors = len(errors) - 10
print(">> Plus {} more errors".format(unshown_errors))
print()
def _process_object(path, immutable, dry_run):
"""Set or unset the immutable attribute for a file.
Parameters
----------
path : str
The absolute path of a file.
immutable: bool
Set immutable attribute if True, unset immutable attribute if False.
dry_run : bool
Whether to do a dry run which makes no changes.
Returns
-------
bool
Whether the immutable attribute was changed, or if this was a dry run,
should have been changed.
Raises
------
AssertionError
If the path is a directory, is a link or does not exist.
SetAttributeError
If the path's immutable attribute cannot be set.
"""
# Parameter check.
assert not os.path.isdir(path)
assert not os.path.islink(path)
assert os.path.exists(path)
try:
is_immutable = utilities.file_is_immutable(path)
except exceptions.GetAttributeError:
msg = "Immutable attribute not settable for {}".format(path)
raise exceptions.SetAttributeError(msg)
change_attribute = immutable != is_immutable
if change_attribute and not dry_run:
attribute = "+i" if immutable else "-i"
_set_attribute(attribute, path)
# The value of change_attribute is a proxy for whether the immutable
# attribute was changed, or if this was a dry run, should have been
# changed.
return change_attribute
def _set_attribute(attribute, path):
"""Set or unset an attribute for a file.
Parameters
----------
attribute: str
The attribute to be set. In the form of "+i" or "-i".
path : str
The absolute path of a file.
Returns
-------
None
Raises
------
SetAttributeError
If the exit status of the chattr command is non-zero.
"""
try:
subprocess.run(
["sudo", "chattr", attribute, path],
check=True,
stderr=subprocess.STDOUT,
stdout=subprocess.DEVNULL,
)
except subprocess.CalledProcessError:
msg = "Immutable attribute not settable for {}".format(path)
raise exceptions.SetAttributeError(msg)
|
en
| 0.766857
|
Set or unset the immutable attribute for all files on a path. Parameters ---------- path : str An absolute path. immutable: bool Set immutable attributes if True, unset immutable attributes if False. include_git: bool Whether to include git files and directories. dry_run: bool Whether to do a dry run which makes no changes. Returns ------- None Raises ------ AssertionError If the path does not exist. # Parameter check. # Set up. # Print the operation. # Print the progress header and set up the progress bar. # Walk the tree. # Count links, but don't try to operate on them as they don't have # an immutable attribute. # Change the file's attribute if necessary. # Count the file. # Update the progress bar. # Print the changes. # Print a summary. # Print any errors. Print the list of errors resulting from file processing. Parameters ---------- errors : list of str A list of error messages. Returns ------- None # Return if there are no errors. # Print the header. # Print up to 10 errors. # If there are more than 10 errors, print a message about how many more # there are. Set or unset the immutable attribute for a file. Parameters ---------- path : str The absolute path of a file. immutable: bool Set immutable attribute if True, unset immutable attribute if False. dry_run : bool Whether to do a dry run which makes no changes. Returns ------- bool Whether the immutable attribute was changed, or if this was a dry run, should have been changed. Raises ------ AssertionError If the path is a directory, is a link or does not exist. SetAttributeError If the path's immutable attribute cannot be set. # Parameter check. # The value of change_attribute is a proxy for whether the immutable # attribute was changed, or if this was a dry run, should have been # changed. Set or unset an attribute for a file. Parameters ---------- attribute: str The attribute to be set. In the form of "+i" or "-i". path : str The absolute path of a file. Returns ------- None Raises ------ SetAttributeError If the exit status of the chattr command is non-zero.
| 2.494737
| 2
|
tebless/examples/menu.py
|
Akhail/Tebless
| 5
|
6629246
|
<reponame>Akhail/Tebless
# Copyright (c) 2017 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from tebless.devs import init_debug
from tebless.utils import Store
from tebless.themes.menu import double
from tebless.widgets import Menu, Window, Label
store = Store()
@Window.decorator(store=store)
def view_menu(window):
theme = double(window, {
'items': ['Valor ' + str(x) for x in range(100)],
})
window += Menu(**theme)
@Window.decorator(store=store)
def view_single_menu(window):
def update_label(sender):
sender.store.label.value = sender.value
window += Menu(
items=['Valor ' + str(x) for x in range(110)],
limit=8,
header='Menu',
footer='Pagina {page} de {last}',
on_change=update_label
)
window += Label(ref='label', cordy=10, text='mundo')
def main():
view_menu()
view_single_menu()
if __name__ == '__main__':
init_debug(__file__)
main()
|
# Copyright (c) 2017 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from tebless.devs import init_debug
from tebless.utils import Store
from tebless.themes.menu import double
from tebless.widgets import Menu, Window, Label
store = Store()
@Window.decorator(store=store)
def view_menu(window):
theme = double(window, {
'items': ['Valor ' + str(x) for x in range(100)],
})
window += Menu(**theme)
@Window.decorator(store=store)
def view_single_menu(window):
def update_label(sender):
sender.store.label.value = sender.value
window += Menu(
items=['Valor ' + str(x) for x in range(110)],
limit=8,
header='Menu',
footer='Pagina {page} de {last}',
on_change=update_label
)
window += Label(ref='label', cordy=10, text='mundo')
def main():
view_menu()
view_single_menu()
if __name__ == '__main__':
init_debug(__file__)
main()
|
en
| 0.859002
|
# Copyright (c) 2017 <NAME> # # This software is released under the MIT License. # https://opensource.org/licenses/MIT
| 2.455064
| 2
|
test_package/conanfile.py
|
Tymolc/conan-depot_tools_installer
| 1
|
6629247
|
from conans import ConanFile
import os
class TestPackage(ConanFile):
def test(self):
self.run("gclient --version", run_environment=True)
|
from conans import ConanFile
import os
class TestPackage(ConanFile):
def test(self):
self.run("gclient --version", run_environment=True)
|
none
| 1
| 1.626222
| 2
|
|
LeetCode/python-R1/0561-数组拆分1/V2.py
|
huuuuusy/Programming-Practice-Everyday
| 4
|
6629248
|
<reponame>huuuuusy/Programming-Practice-Everyday<filename>LeetCode/python-R1/0561-数组拆分1/V2.py
"""
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.36
工具: python == 3.7.3
"""
"""
思路:
先排序
对排序结果,第0,2,4,...个元素是每对的最小值
利用切片提取结果,然后求和即可
结果:
执行用时 : 128 ms, 在所有 Python3 提交中击败了88.15%的用户
内存消耗 : 15 MB, 在所有 Python3 提交中击败了56.00%的用户
"""
class Solution:
def arrayPairSum(self, nums):
result = sum(sorted(nums)[::2])
return result
if __name__ == "__main__":
nums = [7,3,1,0,0,6]
answer = Solution().arrayPairSum(nums)
print(answer)
|
"""
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.36
工具: python == 3.7.3
"""
"""
思路:
先排序
对排序结果,第0,2,4,...个元素是每对的最小值
利用切片提取结果,然后求和即可
结果:
执行用时 : 128 ms, 在所有 Python3 提交中击败了88.15%的用户
内存消耗 : 15 MB, 在所有 Python3 提交中击败了56.00%的用户
"""
class Solution:
def arrayPairSum(self, nums):
result = sum(sorted(nums)[::2])
return result
if __name__ == "__main__":
nums = [7,3,1,0,0,6]
answer = Solution().arrayPairSum(nums)
print(answer)
|
zh
| 0.908651
|
@Author: huuuuusy @GitHub: https://github.com/huuuuusy 系统: Ubuntu 18.04 IDE: VS Code 1.36 工具: python == 3.7.3 思路: 先排序 对排序结果,第0,2,4,...个元素是每对的最小值 利用切片提取结果,然后求和即可 结果: 执行用时 : 128 ms, 在所有 Python3 提交中击败了88.15%的用户 内存消耗 : 15 MB, 在所有 Python3 提交中击败了56.00%的用户
| 3.576692
| 4
|
catalog/migrations/0013_auto_20160926_1901.py
|
starpolar/django-registration
| 1
|
6629249
|
<filename>catalog/migrations/0013_auto_20160926_1901.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-26 09:01
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0012_bookinstance_date_acquired'),
]
operations = [
migrations.AlterField(
model_name='bookinstance',
name='date_acquired',
field=models.DateField(default=datetime.date.today),
),
]
|
<filename>catalog/migrations/0013_auto_20160926_1901.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-26 09:01
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0012_bookinstance_date_acquired'),
]
operations = [
migrations.AlterField(
model_name='bookinstance',
name='date_acquired',
field=models.DateField(default=datetime.date.today),
),
]
|
en
| 0.808435
|
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-09-26 09:01
| 1.456267
| 1
|
samtranslator/swagger/swagger.py
|
giuliocalzolari/serverless-application-model
| 2
|
6629250
|
import copy
from six import string_types
from samtranslator.model.intrinsics import ref
class SwaggerEditor(object):
"""
Wrapper class capable of parsing and generating Swagger JSON. This implements Swagger spec just enough that SAM
cares about. It is built to handle "partial Swagger" ie. Swagger that is incomplete and won't
pass the Swagger spec. But this is necessary for SAM because it iteratively builds the Swagger starting from an
empty skeleton.
"""
_OPTIONS_METHOD = "options"
_X_APIGW_INTEGRATION = 'x-amazon-apigateway-integration'
_X_ANY_METHOD = 'x-amazon-apigateway-any-method'
def __init__(self, doc):
"""
Initialize the class with a swagger dictionary. This class creates a copy of the Swagger and performs all
modifications on this copy.
:param dict doc: Swagger document as a dictionary
:raises ValueError: If the input Swagger document does not meet the basic Swagger requirements.
"""
if not SwaggerEditor.is_valid(doc):
raise ValueError("Invalid Swagger document")
self._doc = copy.deepcopy(doc)
self.paths = self._doc["paths"]
def has_path(self, path, method=None):
"""
Returns True if this Swagger has the given path and optional method
:param string path: Path name
:param string method: HTTP method
:return: True, if this path/method is present in the document
"""
method = self._normalize_method_name(method)
result = path in self.paths
if method:
result = result and \
isinstance(self.paths[path], dict) and \
method in self.paths[path]
return result
def has_integration(self, path, method):
"""
Checks if an API Gateway integration is already present at the given path/method
:param string path: Path name
:param string method: HTTP method
:return: True, if an API Gateway integration is already present
"""
method = self._normalize_method_name(method)
return self.has_path(path, method) and \
isinstance(self.paths[path][method], dict) and \
bool(self.paths[path][method].get(self._X_APIGW_INTEGRATION)) # Key should be present & Value is non-empty
def add_path(self, path, method=None):
"""
Adds the path/method combination to the Swagger, if not already present
:param string path: Path name
:param string method: HTTP method
:raises ValueError: If the value of `path` in Swagger is not a dictionary
"""
method = self._normalize_method_name(method)
self.paths.setdefault(path, {})
if not isinstance(self.paths[path], dict):
# Either customers has provided us an invalid Swagger, or this class has messed it somehow
raise ValueError("Value of '{}' path must be a dictionary according to Swagger spec".format(path))
self.paths[path].setdefault(method, {})
def add_lambda_integration(self, path, method, integration_uri):
"""
Adds aws_proxy APIGW integration to the given path+method.
:param string path: Path name
:param string method: HTTP Method
:param string integration_uri: URI for the integration.
"""
method = self._normalize_method_name(method)
if self.has_integration(path, method):
raise ValueError("Lambda integration already exists on Path={}, Method={}".format(path, method))
self.add_path(path, method)
self.paths[path][method][self._X_APIGW_INTEGRATION] = {
'type': 'aws_proxy',
'httpMethod': 'POST',
'uri': integration_uri
}
# If 'responses' key is *not* present, add it with an empty dict as value
self.paths[path][method].setdefault('responses', {})
def iter_on_path(self):
"""
Yields all the paths available in the Swagger. As a caller, if you add new paths to Swagger while iterating,
they will not show up in this iterator
:yields string: Path name
"""
for path, value in self.paths.items():
yield path
def add_cors(self, path, allowed_origins, allowed_headers=None, allowed_methods=None, max_age=None):
"""
Add CORS configuration to this path. Specifically, we will add a OPTIONS response config to the Swagger that
will return headers required for CORS. Since SAM uses aws_proxy integration, we cannot inject the headers
into the actual response returned from Lambda function. This is something customers have to implement
themselves.
If OPTIONS method is already present for the Path, we will skip adding CORS configuration
Following this guide:
https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool
:param string path: Path to add the CORS configuration to.
:param string/dict allowed_origins: Comma separate list of allowed origins.
Value can also be an intrinsic function dict.
:param string/dict allowed_headers: Comma separated list of allowed headers.
Value can also be an intrinsic function dict.
:param string/dict allowed_methods: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on
Access-Control-Max-Age header. Value can also be an intrinsic function dict.
:raises ValueError: When values for one of the allowed_* variables is empty
"""
# Skip if Options is already present
if self.has_path(path, self._OPTIONS_METHOD):
return
if not allowed_origins:
raise ValueError("Invalid input. Value for AllowedOrigins is required")
if not allowed_methods:
# AllowMethods is not given. Let's try to generate the list from the given Swagger.
allowed_methods = self._make_cors_allowed_methods_for_path(path)
# APIGW expects the value to be a "string expression". Hence wrap in another quote. Ex: "'GET,POST,DELETE'"
allowed_methods = "'{}'".format(allowed_methods)
# Add the Options method and the CORS response
self.add_path(path, self._OPTIONS_METHOD)
self.paths[path][self._OPTIONS_METHOD] = self._options_method_response_for_cors(allowed_origins,
allowed_headers,
allowed_methods,
max_age)
def _options_method_response_for_cors(self, allowed_origins, allowed_headers=None, allowed_methods=None,
max_age=None):
"""
Returns a Swagger snippet containing configuration for OPTIONS HTTP Method to configure CORS.
This snippet is taken from public documentation:
https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool
:param string/dict allowed_origins: Comma separate list of allowed origins.
Value can also be an intrinsic function dict.
:param string/dict allowed_headers: Comma separated list of allowed headers.
Value can also be an intrinsic function dict.
:param string/dict allowed_methods: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on
Access-Control-Max-Age header. Value can also be an intrinsic function dict.
:return dict: Dictionary containing Options method configuration for CORS
"""
ALLOW_ORIGIN = "Access-Control-Allow-Origin"
ALLOW_HEADERS = "Access-Control-Allow-Headers"
ALLOW_METHODS = "Access-Control-Allow-Methods"
MAX_AGE = "Access-Control-Max-Age"
HEADER_RESPONSE = lambda x: "method.response.header."+x
response_parameters = {
# AllowedOrigin is always required
HEADER_RESPONSE(ALLOW_ORIGIN): allowed_origins
}
response_headers = {
# Allow Origin is always required
ALLOW_ORIGIN: {
"type": "string"
}
}
# Optional values. Skip the header if value is empty
#
# The values must not be empty string or null. Also, value of '*' is a very recent addition (2017) and
# not supported in all the browsers. So it is important to skip the header if value is not given
# https://fetch.spec.whatwg.org/#http-new-header-syntax
#
if allowed_headers:
response_parameters[HEADER_RESPONSE(ALLOW_HEADERS)] = allowed_headers
response_headers[ALLOW_HEADERS] = {"type": "string"}
if allowed_methods:
response_parameters[HEADER_RESPONSE(ALLOW_METHODS)] = allowed_methods
response_headers[ALLOW_METHODS] = {"type": "string"}
if max_age is not None:
# MaxAge can be set to 0, which is a valid value. So explicitly check against None
response_parameters[HEADER_RESPONSE(MAX_AGE)] = max_age
response_headers[MAX_AGE] = {"type": "integer"}
return {
"summary": "CORS support",
"consumes": ["application/json"],
"produces": ["application/json"],
self._X_APIGW_INTEGRATION: {
"type": "mock",
"requestTemplates": {
"application/json": "{\n \"statusCode\" : 200\n}\n"
},
"responses": {
"default": {
"statusCode": "200",
"responseParameters": response_parameters,
"responseTemplates": {
"application/json": "{}\n"
}
}
}
},
"responses": {
"200": {
"description": "Default response for CORS method",
"headers": response_headers
}
}
}
def _make_cors_allowed_methods_for_path(self, path):
"""
Creates the value for Access-Control-Allow-Methods header for given path. All HTTP methods defined for this
path will be included in the result. If the path contains "ANY" method, then *all available* HTTP methods will
be returned as result.
:param string path: Path to generate AllowMethods value for
:return string: String containing the value of AllowMethods, if the path contains any methods.
Empty string, otherwise
"""
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html
all_http_methods = ["OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"]
if not self.has_path(path):
return ""
# At this point, value of Swagger path should be a dictionary with method names being the keys
methods = list(self.paths[path].keys())
if self._X_ANY_METHOD in methods:
# API Gateway's ANY method is not a real HTTP method but a wildcard representing all HTTP methods
allow_methods = all_http_methods
else:
allow_methods = methods
allow_methods.append("options") # Always add Options to the CORS methods response
# Clean up the result:
#
# - HTTP Methods **must** be upper case and they are case sensitive.
# (https://tools.ietf.org/html/rfc7231#section-4.1)
# - Convert to set to remove any duplicates
# - Sort to keep this list stable because it could be constructed from dictionary keys which are *not* ordered.
# Therefore we might get back a different list each time the code runs. To prevent any unnecessary
# regression, we sort the list so the returned value is stable.
allow_methods = list({m.upper() for m in allow_methods})
allow_methods.sort()
# Allow-Methods is comma separated string
return ','.join(allow_methods)
@property
def swagger(self):
"""
Returns a **copy** of the Swagger document as a dictionary.
:return dict: Dictionary containing the Swagger document
"""
# Make sure any changes to the paths are reflected back in output
self._doc["paths"] = self.paths
return copy.deepcopy(self._doc)
@staticmethod
def is_valid(data):
"""
Checks if the input data is a Swagger document
:param dict data: Data to be validated
:return: True, if data is a Swagger
"""
return bool(data) and \
isinstance(data, dict) and \
bool(data.get("swagger")) and \
isinstance(data.get('paths'), dict)
@staticmethod
def gen_skeleton():
"""
Method to make an empty swagger file, with just some basic structure. Just enough to pass validator.
:return dict: Dictionary of a skeleton swagger document
"""
return {
'swagger': '2.0',
'info': {
'version': '1.0',
'title': ref('AWS::StackName')
},
'paths': {
}
}
@staticmethod
def _normalize_method_name(method):
"""
Returns a lower case, normalized version of HTTP Method. It also know how to handle API Gateway specific methods
like "ANY"
NOTE: Always normalize before using the `method` value passed in as input
:param string method: Name of the HTTP Method
:return string: Normalized method name
"""
if not method or not isinstance(method, string_types):
return method
method = method.lower()
if method == 'any':
return SwaggerEditor._X_ANY_METHOD
else:
return method
|
import copy
from six import string_types
from samtranslator.model.intrinsics import ref
class SwaggerEditor(object):
"""
Wrapper class capable of parsing and generating Swagger JSON. This implements Swagger spec just enough that SAM
cares about. It is built to handle "partial Swagger" ie. Swagger that is incomplete and won't
pass the Swagger spec. But this is necessary for SAM because it iteratively builds the Swagger starting from an
empty skeleton.
"""
_OPTIONS_METHOD = "options"
_X_APIGW_INTEGRATION = 'x-amazon-apigateway-integration'
_X_ANY_METHOD = 'x-amazon-apigateway-any-method'
def __init__(self, doc):
"""
Initialize the class with a swagger dictionary. This class creates a copy of the Swagger and performs all
modifications on this copy.
:param dict doc: Swagger document as a dictionary
:raises ValueError: If the input Swagger document does not meet the basic Swagger requirements.
"""
if not SwaggerEditor.is_valid(doc):
raise ValueError("Invalid Swagger document")
self._doc = copy.deepcopy(doc)
self.paths = self._doc["paths"]
def has_path(self, path, method=None):
"""
Returns True if this Swagger has the given path and optional method
:param string path: Path name
:param string method: HTTP method
:return: True, if this path/method is present in the document
"""
method = self._normalize_method_name(method)
result = path in self.paths
if method:
result = result and \
isinstance(self.paths[path], dict) and \
method in self.paths[path]
return result
def has_integration(self, path, method):
"""
Checks if an API Gateway integration is already present at the given path/method
:param string path: Path name
:param string method: HTTP method
:return: True, if an API Gateway integration is already present
"""
method = self._normalize_method_name(method)
return self.has_path(path, method) and \
isinstance(self.paths[path][method], dict) and \
bool(self.paths[path][method].get(self._X_APIGW_INTEGRATION)) # Key should be present & Value is non-empty
def add_path(self, path, method=None):
"""
Adds the path/method combination to the Swagger, if not already present
:param string path: Path name
:param string method: HTTP method
:raises ValueError: If the value of `path` in Swagger is not a dictionary
"""
method = self._normalize_method_name(method)
self.paths.setdefault(path, {})
if not isinstance(self.paths[path], dict):
# Either customers has provided us an invalid Swagger, or this class has messed it somehow
raise ValueError("Value of '{}' path must be a dictionary according to Swagger spec".format(path))
self.paths[path].setdefault(method, {})
def add_lambda_integration(self, path, method, integration_uri):
"""
Adds aws_proxy APIGW integration to the given path+method.
:param string path: Path name
:param string method: HTTP Method
:param string integration_uri: URI for the integration.
"""
method = self._normalize_method_name(method)
if self.has_integration(path, method):
raise ValueError("Lambda integration already exists on Path={}, Method={}".format(path, method))
self.add_path(path, method)
self.paths[path][method][self._X_APIGW_INTEGRATION] = {
'type': 'aws_proxy',
'httpMethod': 'POST',
'uri': integration_uri
}
# If 'responses' key is *not* present, add it with an empty dict as value
self.paths[path][method].setdefault('responses', {})
def iter_on_path(self):
"""
Yields all the paths available in the Swagger. As a caller, if you add new paths to Swagger while iterating,
they will not show up in this iterator
:yields string: Path name
"""
for path, value in self.paths.items():
yield path
def add_cors(self, path, allowed_origins, allowed_headers=None, allowed_methods=None, max_age=None):
"""
Add CORS configuration to this path. Specifically, we will add a OPTIONS response config to the Swagger that
will return headers required for CORS. Since SAM uses aws_proxy integration, we cannot inject the headers
into the actual response returned from Lambda function. This is something customers have to implement
themselves.
If OPTIONS method is already present for the Path, we will skip adding CORS configuration
Following this guide:
https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool
:param string path: Path to add the CORS configuration to.
:param string/dict allowed_origins: Comma separate list of allowed origins.
Value can also be an intrinsic function dict.
:param string/dict allowed_headers: Comma separated list of allowed headers.
Value can also be an intrinsic function dict.
:param string/dict allowed_methods: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on
Access-Control-Max-Age header. Value can also be an intrinsic function dict.
:raises ValueError: When values for one of the allowed_* variables is empty
"""
# Skip if Options is already present
if self.has_path(path, self._OPTIONS_METHOD):
return
if not allowed_origins:
raise ValueError("Invalid input. Value for AllowedOrigins is required")
if not allowed_methods:
# AllowMethods is not given. Let's try to generate the list from the given Swagger.
allowed_methods = self._make_cors_allowed_methods_for_path(path)
# APIGW expects the value to be a "string expression". Hence wrap in another quote. Ex: "'GET,POST,DELETE'"
allowed_methods = "'{}'".format(allowed_methods)
# Add the Options method and the CORS response
self.add_path(path, self._OPTIONS_METHOD)
self.paths[path][self._OPTIONS_METHOD] = self._options_method_response_for_cors(allowed_origins,
allowed_headers,
allowed_methods,
max_age)
def _options_method_response_for_cors(self, allowed_origins, allowed_headers=None, allowed_methods=None,
max_age=None):
"""
Returns a Swagger snippet containing configuration for OPTIONS HTTP Method to configure CORS.
This snippet is taken from public documentation:
https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool
:param string/dict allowed_origins: Comma separate list of allowed origins.
Value can also be an intrinsic function dict.
:param string/dict allowed_headers: Comma separated list of allowed headers.
Value can also be an intrinsic function dict.
:param string/dict allowed_methods: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on
Access-Control-Max-Age header. Value can also be an intrinsic function dict.
:return dict: Dictionary containing Options method configuration for CORS
"""
ALLOW_ORIGIN = "Access-Control-Allow-Origin"
ALLOW_HEADERS = "Access-Control-Allow-Headers"
ALLOW_METHODS = "Access-Control-Allow-Methods"
MAX_AGE = "Access-Control-Max-Age"
HEADER_RESPONSE = lambda x: "method.response.header."+x
response_parameters = {
# AllowedOrigin is always required
HEADER_RESPONSE(ALLOW_ORIGIN): allowed_origins
}
response_headers = {
# Allow Origin is always required
ALLOW_ORIGIN: {
"type": "string"
}
}
# Optional values. Skip the header if value is empty
#
# The values must not be empty string or null. Also, value of '*' is a very recent addition (2017) and
# not supported in all the browsers. So it is important to skip the header if value is not given
# https://fetch.spec.whatwg.org/#http-new-header-syntax
#
if allowed_headers:
response_parameters[HEADER_RESPONSE(ALLOW_HEADERS)] = allowed_headers
response_headers[ALLOW_HEADERS] = {"type": "string"}
if allowed_methods:
response_parameters[HEADER_RESPONSE(ALLOW_METHODS)] = allowed_methods
response_headers[ALLOW_METHODS] = {"type": "string"}
if max_age is not None:
# MaxAge can be set to 0, which is a valid value. So explicitly check against None
response_parameters[HEADER_RESPONSE(MAX_AGE)] = max_age
response_headers[MAX_AGE] = {"type": "integer"}
return {
"summary": "CORS support",
"consumes": ["application/json"],
"produces": ["application/json"],
self._X_APIGW_INTEGRATION: {
"type": "mock",
"requestTemplates": {
"application/json": "{\n \"statusCode\" : 200\n}\n"
},
"responses": {
"default": {
"statusCode": "200",
"responseParameters": response_parameters,
"responseTemplates": {
"application/json": "{}\n"
}
}
}
},
"responses": {
"200": {
"description": "Default response for CORS method",
"headers": response_headers
}
}
}
def _make_cors_allowed_methods_for_path(self, path):
"""
Creates the value for Access-Control-Allow-Methods header for given path. All HTTP methods defined for this
path will be included in the result. If the path contains "ANY" method, then *all available* HTTP methods will
be returned as result.
:param string path: Path to generate AllowMethods value for
:return string: String containing the value of AllowMethods, if the path contains any methods.
Empty string, otherwise
"""
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html
all_http_methods = ["OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"]
if not self.has_path(path):
return ""
# At this point, value of Swagger path should be a dictionary with method names being the keys
methods = list(self.paths[path].keys())
if self._X_ANY_METHOD in methods:
# API Gateway's ANY method is not a real HTTP method but a wildcard representing all HTTP methods
allow_methods = all_http_methods
else:
allow_methods = methods
allow_methods.append("options") # Always add Options to the CORS methods response
# Clean up the result:
#
# - HTTP Methods **must** be upper case and they are case sensitive.
# (https://tools.ietf.org/html/rfc7231#section-4.1)
# - Convert to set to remove any duplicates
# - Sort to keep this list stable because it could be constructed from dictionary keys which are *not* ordered.
# Therefore we might get back a different list each time the code runs. To prevent any unnecessary
# regression, we sort the list so the returned value is stable.
allow_methods = list({m.upper() for m in allow_methods})
allow_methods.sort()
# Allow-Methods is comma separated string
return ','.join(allow_methods)
@property
def swagger(self):
"""
Returns a **copy** of the Swagger document as a dictionary.
:return dict: Dictionary containing the Swagger document
"""
# Make sure any changes to the paths are reflected back in output
self._doc["paths"] = self.paths
return copy.deepcopy(self._doc)
@staticmethod
def is_valid(data):
"""
Checks if the input data is a Swagger document
:param dict data: Data to be validated
:return: True, if data is a Swagger
"""
return bool(data) and \
isinstance(data, dict) and \
bool(data.get("swagger")) and \
isinstance(data.get('paths'), dict)
@staticmethod
def gen_skeleton():
"""
Method to make an empty swagger file, with just some basic structure. Just enough to pass validator.
:return dict: Dictionary of a skeleton swagger document
"""
return {
'swagger': '2.0',
'info': {
'version': '1.0',
'title': ref('AWS::StackName')
},
'paths': {
}
}
@staticmethod
def _normalize_method_name(method):
"""
Returns a lower case, normalized version of HTTP Method. It also know how to handle API Gateway specific methods
like "ANY"
NOTE: Always normalize before using the `method` value passed in as input
:param string method: Name of the HTTP Method
:return string: Normalized method name
"""
if not method or not isinstance(method, string_types):
return method
method = method.lower()
if method == 'any':
return SwaggerEditor._X_ANY_METHOD
else:
return method
|
en
| 0.767096
|
Wrapper class capable of parsing and generating Swagger JSON. This implements Swagger spec just enough that SAM cares about. It is built to handle "partial Swagger" ie. Swagger that is incomplete and won't pass the Swagger spec. But this is necessary for SAM because it iteratively builds the Swagger starting from an empty skeleton. Initialize the class with a swagger dictionary. This class creates a copy of the Swagger and performs all modifications on this copy. :param dict doc: Swagger document as a dictionary :raises ValueError: If the input Swagger document does not meet the basic Swagger requirements. Returns True if this Swagger has the given path and optional method :param string path: Path name :param string method: HTTP method :return: True, if this path/method is present in the document Checks if an API Gateway integration is already present at the given path/method :param string path: Path name :param string method: HTTP method :return: True, if an API Gateway integration is already present # Key should be present & Value is non-empty Adds the path/method combination to the Swagger, if not already present :param string path: Path name :param string method: HTTP method :raises ValueError: If the value of `path` in Swagger is not a dictionary # Either customers has provided us an invalid Swagger, or this class has messed it somehow Adds aws_proxy APIGW integration to the given path+method. :param string path: Path name :param string method: HTTP Method :param string integration_uri: URI for the integration. # If 'responses' key is *not* present, add it with an empty dict as value Yields all the paths available in the Swagger. As a caller, if you add new paths to Swagger while iterating, they will not show up in this iterator :yields string: Path name Add CORS configuration to this path. Specifically, we will add a OPTIONS response config to the Swagger that will return headers required for CORS. Since SAM uses aws_proxy integration, we cannot inject the headers into the actual response returned from Lambda function. This is something customers have to implement themselves. If OPTIONS method is already present for the Path, we will skip adding CORS configuration Following this guide: https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool :param string path: Path to add the CORS configuration to. :param string/dict allowed_origins: Comma separate list of allowed origins. Value can also be an intrinsic function dict. :param string/dict allowed_headers: Comma separated list of allowed headers. Value can also be an intrinsic function dict. :param string/dict allowed_methods: Comma separated list of allowed methods. Value can also be an intrinsic function dict. :param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on Access-Control-Max-Age header. Value can also be an intrinsic function dict. :raises ValueError: When values for one of the allowed_* variables is empty # Skip if Options is already present # AllowMethods is not given. Let's try to generate the list from the given Swagger. # APIGW expects the value to be a "string expression". Hence wrap in another quote. Ex: "'GET,POST,DELETE'" # Add the Options method and the CORS response Returns a Swagger snippet containing configuration for OPTIONS HTTP Method to configure CORS. This snippet is taken from public documentation: https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool :param string/dict allowed_origins: Comma separate list of allowed origins. Value can also be an intrinsic function dict. :param string/dict allowed_headers: Comma separated list of allowed headers. Value can also be an intrinsic function dict. :param string/dict allowed_methods: Comma separated list of allowed methods. Value can also be an intrinsic function dict. :param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on Access-Control-Max-Age header. Value can also be an intrinsic function dict. :return dict: Dictionary containing Options method configuration for CORS # AllowedOrigin is always required # Allow Origin is always required # Optional values. Skip the header if value is empty # # The values must not be empty string or null. Also, value of '*' is a very recent addition (2017) and # not supported in all the browsers. So it is important to skip the header if value is not given # https://fetch.spec.whatwg.org/#http-new-header-syntax # # MaxAge can be set to 0, which is a valid value. So explicitly check against None Creates the value for Access-Control-Allow-Methods header for given path. All HTTP methods defined for this path will be included in the result. If the path contains "ANY" method, then *all available* HTTP methods will be returned as result. :param string path: Path to generate AllowMethods value for :return string: String containing the value of AllowMethods, if the path contains any methods. Empty string, otherwise # https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html # At this point, value of Swagger path should be a dictionary with method names being the keys # API Gateway's ANY method is not a real HTTP method but a wildcard representing all HTTP methods # Always add Options to the CORS methods response # Clean up the result: # # - HTTP Methods **must** be upper case and they are case sensitive. # (https://tools.ietf.org/html/rfc7231#section-4.1) # - Convert to set to remove any duplicates # - Sort to keep this list stable because it could be constructed from dictionary keys which are *not* ordered. # Therefore we might get back a different list each time the code runs. To prevent any unnecessary # regression, we sort the list so the returned value is stable. # Allow-Methods is comma separated string Returns a **copy** of the Swagger document as a dictionary. :return dict: Dictionary containing the Swagger document # Make sure any changes to the paths are reflected back in output Checks if the input data is a Swagger document :param dict data: Data to be validated :return: True, if data is a Swagger Method to make an empty swagger file, with just some basic structure. Just enough to pass validator. :return dict: Dictionary of a skeleton swagger document Returns a lower case, normalized version of HTTP Method. It also know how to handle API Gateway specific methods like "ANY" NOTE: Always normalize before using the `method` value passed in as input :param string method: Name of the HTTP Method :return string: Normalized method name
| 2.339125
| 2
|