blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
385aee105ef70b0597d7b7110dd3fece93ab0f46 | 9f9f5f35447bdd6b97f53a4501c31f9b8a18fee1 | /code/main.py | a37d1229b74eda718102f7283f3bc6cfcddc54bf | [] | no_license | omidMemari/Graphical-Models-for-Inference | d36214cbd3b131e875ff755f22da921ed38bf97a | 44616752ab735997b93d10eac0913c107274f527 | refs/heads/master | 2020-06-29T03:44:23.373935 | 2019-08-04T01:21:38 | 2019-08-04T01:21:38 | 200,431,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | from string import ascii_lowercase
import numpy as np
from ref_optimize import ref_optimize
def read_train(filename):
#function to read training data
mapping = list(enumerate(ascii_lowercase))
mapping = { i[1]:i[0] for i in mapping }
with open(filename, "r") as f:
raw_data = f.read()
raw_data = raw_data.split("\n")
dataX, dataY = [], []
tempX, tempY = [], []
for row in raw_data[:-1]:
row = row.split(" ")
tempY.append( mapping[row[1]])
tempX.append( np.array(row[5:], dtype=float) )
if int(row[2]) < 0:
dataX.append(np.array(tempX))
dataY.append(np.array(tempY, dtype=int))
tempX, tempY = [], []
ret = zip(dataX, dataY)
return list(ret)
def read_test(filename):
#function to read testing data
mapping = list(enumerate(ascii_lowercase))
mapping = { i[1]:i[0] for i in mapping }
with open(filename, "r") as f:
raw_data = f.read()
raw_data = raw_data.split("\n")
dataX, dataY = [], []
tempX, tempY = [], []
for row in raw_data[:-1]:
row = row.split(" ")
tempY.append( mapping[row[1]])
tempX.append( np.array(row[5:], dtype=float) )
if int(row[2]) < 0:
dataX.append(np.array(tempX))
dataY.append(np.array(tempY, dtype=int))
tempX, tempY = [], []
ret = zip(dataX, dataY)
return list(ret)
def read_model():
#function to read model for 2a
with open("../data/model.txt", "r") as f:
raw_data = f.read()
raw_data = raw_data.split("\n")
W = np.array(raw_data[:26*128], dtype=float).reshape(26, 128)
# print "in read_model"
# print W
T = np.array(raw_data[26*128:-1], dtype=float).reshape(26, 26)
T = np.swapaxes(T, 0, 1)
return W, T
def main():
train_filename = "../data/train_small.txt"
test_filename = "../data/test.txt"
train_data = read_train(train_filename)
test_data = read_train(test_filename)
# print ref_optimize(train_data, test_data, c=1000)
#main()
| [
"noreply@github.com"
] | noreply@github.com |
7fd84e341731f0099dc64fe1a810fec10afbe7f9 | 810f29a3a5f681d28218fed75580ddd73ae94b44 | /gplib.py | d2d38e218ce3640fdf2729f278849fe7c4001573 | [
"MIT"
] | permissive | juancq/shape-deform | 1ca633b302203a13645418e860e73eac37a97f4c | aff55147027fe3607b790d61d6cd018e76e04256 | refs/heads/master | 2021-01-16T17:48:58.949374 | 2017-08-16T09:26:11 | 2017-08-16T09:26:11 | 78,915,826 | 1 | 14 | null | 2017-05-30T03:33:36 | 2017-01-14T05:18:01 | JavaScript | UTF-8 | Python | false | false | 47,610 | py | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""The :mod:`gp` module provides the methods and classes to perform
Genetic Programming with DEAP. It essentially contains the classes to
build a Genetic Program Tree, and the functions to evaluate it.
This module support both strongly and loosely typed GP.
"""
import copy
import math
import random
import re
import sys
import warnings
from collections import defaultdict, deque
from functools import partial, wraps
from inspect import isclass
from operator import eq, lt
from deap import tools # Needed by HARM-GP
######################################
# GP Data structure #
######################################
# Define the name of type for any types.
__type__ = object
class PrimitiveTree(list):
"""Tree specifically formatted for optimization of genetic
programming operations. The tree is represented with a
list where the nodes are appended in a depth-first order.
The nodes appended to the tree are required to
have an attribute *arity* which defines the arity of the
primitive. An arity of 0 is expected from terminals nodes.
"""
def __init__(self, content):
list.__init__(self, content)
def __deepcopy__(self, memo):
new = self.__class__(self)
new.__dict__.update(copy.deepcopy(self.__dict__, memo))
return new
def __setitem__(self, key, val):
# Check for most common errors
# Does NOT check for STGP constraints
if isinstance(key, slice):
if key.start >= len(self):
raise IndexError("Invalid slice object (try to assign a %s"
" in a tree of size %d). Even if this is allowed by the"
" list object slice setter, this should not be done in"
" the PrimitiveTree context, as this may lead to an"
" unpredictable behavior for searchSubtree or evaluate."
% (key, len(self)))
total = val[0].arity
for node in val[1:]:
total += node.arity - 1
if total != 0:
raise ValueError("Invalid slice assignation : insertion of"
" an incomplete subtree is not allowed in PrimitiveTree."
" A tree is defined as incomplete when some nodes cannot"
" be mapped to any position in the tree, considering the"
" primitives' arity. For instance, the tree [sub, 4, 5,"
" 6] is incomplete if the arity of sub is 2, because it"
" would produce an orphan node (the 6).")
elif val.arity != self[key].arity:
raise ValueError("Invalid node replacement with a node of a"
" different arity.")
list.__setitem__(self, key, val)
def js_str(self):
"""Return the expression in a human readable string.
"""
string = ""
stack = []
for node in self:
stack.append((node, []))
while len(stack[-1][1]) == stack[-1][0].arity:
prim, args = stack.pop()
if type(prim) is Primitive:
string = prim.d_format(*args)
else:
string = prim.format(*args)
if len(stack) == 0:
break # If stack is empty, all nodes should have been seen
stack[-1][1].append(string)
return string
def __str__(self):
"""Return the expression in a human readable string.
"""
string = ""
stack = []
for node in self:
stack.append((node, []))
while len(stack[-1][1]) == stack[-1][0].arity:
prim, args = stack.pop()
string = prim.format(*args)
if len(stack) == 0:
break # If stack is empty, all nodes should have been seen
stack[-1][1].append(string)
return string
@classmethod
def from_string(cls, string, pset):
"""Try to convert a string expression into a PrimitiveTree given a
PrimitiveSet *pset*. The primitive set needs to contain every primitive
present in the expression.
:param string: String representation of a Python expression.
:param pset: Primitive set from which primitives are selected.
:returns: PrimitiveTree populated with the deserialized primitives.
"""
tokens = re.split("[ \t\n\r\f\v(),]", string)
expr = []
ret_types = deque()
for token in tokens:
if token == '':
continue
if len(ret_types) != 0:
type_ = ret_types.popleft()
else:
type_ = None
if token in pset.mapping:
primitive = pset.mapping[token]
if type_ is not None and not issubclass(primitive.ret, type_):
raise TypeError("Primitive {} return type {} does not "
"match the expected one: {}."
.format(primitive, primitive.ret, type_))
expr.append(primitive)
if isinstance(primitive, Primitive):
ret_types.extendleft(reversed(primitive.args))
else:
try:
token = eval(token)
except NameError:
raise TypeError("Unable to evaluate terminal: {}.".format(token))
if type_ is None:
type_ = type(token)
if not issubclass(type(token), type_):
raise TypeError("Terminal {} type {} does not "
"match the expected one: {}."
.format(token, type(token), type_))
expr.append(Terminal(token, False, type_))
return cls(expr)
@property
def height(self):
"""Return the height of the tree, or the depth of the
deepest node.
"""
stack = [0]
max_depth = 0
for elem in self:
depth = stack.pop()
max_depth = max(max_depth, depth)
stack.extend([depth + 1] * elem.arity)
return max_depth
@property
def root(self):
"""Root of the tree, the element 0 of the list.
"""
return self[0]
def searchSubtree(self, begin):
"""Return a slice object that corresponds to the
range of values that defines the subtree which has the
element with index *begin* as its root.
"""
end = begin + 1
total = self[begin].arity
while total > 0:
total += self[end].arity - 1
end += 1
return slice(begin, end)
class Primitive(object):
"""Class that encapsulates a primitive and when called with arguments it
returns the Python code to call the primitive with the arguments.
>>> pr = Primitive("mul", (int, int), int)
>>> pr.format(1, 2)
'mul(1, 2)'
"""
__slots__ = ('name', 'arity', 'args', 'ret', 'seq')
def __init__(self, name, args, ret):
self.name = name
self.arity = len(args)
self.args = args
self.ret = ret
args = ", ".join(map("{{{0}}}".format, range(self.arity)))
self.seq = "{name}({args})".format(name=self.name, args=args)
def format(self, *args):
return self.seq.format(*args)
def d_format(self, *args):
if self.arity == 2:
return "({arg1} {name} {arg2})".format(name=self.name, arg1=args[0], arg2=args[1])
else:
return self.seq.format(*args)
def __eq__(self, other):
if type(self) is type(other):
return all(getattr(self, slot) == getattr(other, slot)
for slot in self.__slots__)
else:
return NotImplemented
class Terminal(object):
"""Class that encapsulates terminal primitive in expression. Terminals can
be values or 0-arity functions.
"""
__slots__ = ('name', 'value', 'ret', 'conv_fct')
def __init__(self, terminal, symbolic, ret):
self.ret = ret
self.value = terminal
self.name = str(terminal)
self.conv_fct = str if symbolic else repr
@property
def arity(self):
return 0
def format(self):
return self.conv_fct(self.value)
def __eq__(self, other):
if type(self) is type(other):
return all(getattr(self, slot) == getattr(other, slot)
for slot in self.__slots__)
else:
return NotImplemented
class Ephemeral(Terminal):
"""Class that encapsulates a terminal which value is set when the
object is created. To mutate the value, a new object has to be
generated. This is an abstract base class. When subclassing, a
staticmethod 'func' must be defined.
"""
def __init__(self):
Terminal.__init__(self, self.func(), symbolic=False, ret=self.ret)
@staticmethod
def func():
"""Return a random value used to define the ephemeral state.
"""
raise NotImplementedError
class PrimitiveSetTyped(object):
"""Class that contains the primitives that can be used to solve a
Strongly Typed GP problem. The set also defined the researched
function return type, and input arguments type and number.
"""
def __init__(self, name, in_types, ret_type, prefix="ARG"):
self.terminals = defaultdict(list)
self.primitives = defaultdict(list)
self.arguments = []
# setting "__builtins__" to None avoid the context
# being polluted by builtins function when evaluating
# GP expression.
self.context = {"__builtins__": None}
self.mapping = dict()
self.terms_count = 0
self.prims_count = 0
self.name = name
self.ret = ret_type
self.ins = in_types
for i, type_ in enumerate(in_types):
arg_str = "{prefix}{index}".format(prefix=prefix, index=i)
self.arguments.append(arg_str)
term = Terminal(arg_str, True, type_)
self._add(term)
self.terms_count += 1
def renameArguments(self, **kargs):
"""Rename function arguments with new names from *kargs*.
"""
for i, old_name in enumerate(self.arguments):
if old_name in kargs:
new_name = kargs[old_name]
self.arguments[i] = new_name
self.mapping[new_name] = self.mapping[old_name]
self.mapping[new_name].value = new_name
del self.mapping[old_name]
def _add(self, prim):
def addType(dict_, ret_type):
if not ret_type in dict_:
new_list = []
for type_, list_ in dict_.items():
if issubclass(type_, ret_type):
for item in list_:
if not item in new_list:
new_list.append(item)
dict_[ret_type] = new_list
addType(self.primitives, prim.ret)
addType(self.terminals, prim.ret)
self.mapping[prim.name] = prim
if isinstance(prim, Primitive):
for type_ in prim.args:
addType(self.primitives, type_)
addType(self.terminals, type_)
dict_ = self.primitives
else:
dict_ = self.terminals
for type_ in dict_:
if issubclass(prim.ret, type_):
dict_[type_].append(prim)
def addPrimitive(self, primitive, in_types, ret_type, name=None):
"""Add a primitive to the set.
:param primitive: callable object or a function.
:parma in_types: list of primitives arguments' type
:param ret_type: type returned by the primitive.
:param name: alternative name for the primitive instead
of its __name__ attribute.
"""
if name is None:
name = primitive.__name__
prim = Primitive(name, in_types, ret_type)
assert name not in self.context or \
self.context[name] is primitive, \
"Primitives are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second '%s' primitive." % (name,)
self._add(prim)
self.context[prim.name] = primitive
self.prims_count += 1
def addTerminal(self, terminal, ret_type, name=None):
"""Add a terminal to the set. Terminals can be named
using the optional *name* argument. This should be
used : to define named constant (i.e.: pi); to speed the
evaluation time when the object is long to build; when
the object does not have a __repr__ functions that returns
the code to build the object; when the object class is
not a Python built-in.
:param terminal: Object, or a function with no arguments.
:param ret_type: Type of the terminal.
:param name: defines the name of the terminal in the expression.
"""
symbolic = False
if name is None and callable(terminal):
name = terminal.__name__
assert name not in self.context, \
"Terminals are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second %s terminal." % (name,)
if name is not None:
self.context[name] = terminal
terminal = name
symbolic = True
elif terminal in (True, False):
# To support True and False terminals with Python 2.
self.context[str(terminal)] = terminal
prim = Terminal(terminal, symbolic, ret_type)
self._add(prim)
self.terms_count += 1
def addEphemeralConstant(self, name, ephemeral, ret_type):
"""Add an ephemeral constant to the set. An ephemeral constant
is a no argument function that returns a random value. The value
of the constant is constant for a Tree, but may differ from one
Tree to another.
:param name: name used to refers to this ephemeral type.
:param ephemeral: function with no arguments returning a random value.
:param ret_type: type of the object returned by *ephemeral*.
"""
module_gp = globals()
if not name in module_gp:
class_ = type(name, (Ephemeral,), {'func': staticmethod(ephemeral),
'ret': ret_type})
module_gp[name] = class_
else:
class_ = module_gp[name]
if issubclass(class_, Ephemeral):
if class_.func is not ephemeral:
raise Exception("Ephemerals with different functions should "
"be named differently, even between psets.")
elif class_.ret is not ret_type:
raise Exception("Ephemerals with the same name and function "
"should have the same type, even between psets.")
else:
raise Exception("Ephemerals should be named differently "
"than classes defined in the gp module.")
self._add(class_)
self.terms_count += 1
def addADF(self, adfset):
"""Add an Automatically Defined Function (ADF) to the set.
:param adfset: PrimitiveSetTyped containing the primitives with which
the ADF can be built.
"""
prim = Primitive(adfset.name, adfset.ins, adfset.ret)
self._add(prim)
self.prims_count += 1
@property
def terminalRatio(self):
"""Return the ratio of the number of terminals on the number of all
kind of primitives.
"""
return self.terms_count / float(self.terms_count + self.prims_count)
class PrimitiveSet(PrimitiveSetTyped):
"""Class same as :class:`~deap.gp.PrimitiveSetTyped`, except there is no
definition of type.
"""
def __init__(self, name, arity, prefix="ARG"):
args = [__type__] * arity
PrimitiveSetTyped.__init__(self, name, args, __type__, prefix)
self.js = None
def addPrimitive(self, primitive, arity, name=None, js=None):
"""Add primitive *primitive* with arity *arity* to the set.
If a name *name* is provided, it will replace the attribute __name__
attribute to represent/identify the primitive.
"""
assert arity > 0, "arity should be >= 1"
args = [__type__] * arity
self.js = js
PrimitiveSetTyped.addPrimitive(self, primitive, args, __type__, name)
def addTerminal(self, terminal, name=None):
"""Add a terminal to the set."""
PrimitiveSetTyped.addTerminal(self, terminal, __type__, name)
def addEphemeralConstant(self, name, ephemeral):
"""Add an ephemeral constant to the set."""
PrimitiveSetTyped.addEphemeralConstant(self, name, ephemeral, __type__)
######################################
# GP Tree compilation functions #
######################################
def compile(expr, pset):
"""Compile the expression *expr*.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param pset: Primitive set against which the expression is compile.
:returns: a function if the primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
code = str(expr)
if len(pset.arguments) > 0:
# This section is a stripped version of the lambdify
# function of SymPy 0.6.6.
args = ",".join(arg for arg in pset.arguments)
code = "lambda {args}: {code}".format(args=args, code=code)
try:
return eval(code, pset.context, {})
except MemoryError:
_, _, traceback = sys.exc_info()
raise MemoryError, ("DEAP : Error in tree evaluation :"
" Python cannot evaluate a tree higher than 90. "
"To avoid this problem, you should use bloat control on your "
"operators. See the DEAP documentation for more information. "
"DEAP will now abort."), traceback
def compileADF(expr, psets):
"""Compile the expression represented by a list of trees. The first
element of the list is the main tree, and the following elements are
automatically defined functions (ADF) that can be called by the first
tree.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param psets: List of primitive sets. Each set corresponds to an ADF
while the last set is associated with the expression
and should contain reference to the preceding ADFs.
:returns: a function if the main primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
adfdict = {}
func = None
for pset, subexpr in reversed(zip(psets, expr)):
pset.context.update(adfdict)
func = compile(subexpr, pset)
adfdict.update({pset.name: func})
return func
######################################
# GP Program generation functions #
######################################
def genFull(pset, min_, max_, type_=None):
"""Generate an expression where each leaf has a the same depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A full tree with all leaves at the same depth.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height."""
return depth == height
return generate(pset, min_, max_, condition, type_)
def genGrow(pset, min_, max_, type_=None):
"""Generate an expression where each leaf might have a different depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A grown tree with leaves at possibly different depths.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height
or when it is randomly determined that a a node should be a terminal.
"""
return depth == height or \
(depth >= min_ and random.random() < pset.terminalRatio)
return generate(pset, min_, max_, condition, type_)
def genHalfAndHalf(pset, min_, max_, type_=None):
"""Generate an expression with a PrimitiveSet *pset*.
Half the time, the expression is generated with :func:`~deap.gp.genGrow`,
the other half, the expression is generated with :func:`~deap.gp.genFull`.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: Either, a full or a grown tree.
"""
method = random.choice((genGrow, genFull))
return method(pset, min_, max_, type_)
def genRamped(pset, min_, max_, type_=None):
"""
.. deprecated:: 1.0
The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead.
"""
warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.",
FutureWarning)
return genHalfAndHalf(pset, min_, max_, type_)
def generate(pset, min_, max_, condition, type_=None):
"""Generate a Tree as a list of list. The tree is build
from the root to the leaves, and it stop growing when the
condition is fulfilled.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param condition: The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A grown tree with leaves at possibly different depths
dependending on the condition function.
"""
if type_ is None:
type_ = pset.ret
expr = []
height = random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
if condition(height, depth):
try:
term = random.choice(pset.terminals[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a terminal of type '%s', but there is "\
"none available." % (type_,), traceback
if isclass(term):
term = term()
expr.append(term)
else:
try:
prim = random.choice(pset.primitives[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a primitive of type '%s', but there is "\
"none available." % (type_,), traceback
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth + 1, arg))
print 'in gen: ', expr
return expr
######################################
# GP Crossovers #
######################################
def cxOnePoint(ind1, ind2):
"""Randomly select in each individual and exchange each subtree with the
point as root between each individual.
:param ind1: First tree participating in the crossover.
:param ind2: Second tree participating in the crossover.
:returns: A tuple of two trees.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# List all available primitive types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
if ind1.root.ret == __type__:
# Not STGP optimization
types1[__type__] = xrange(1, len(ind1))
types2[__type__] = xrange(1, len(ind2))
common_types = [__type__]
else:
for idx, node in enumerate(ind1[1:], 1):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
type_ = random.choice(list(common_types))
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
def cxOnePointLeafBiased(ind1, ind2, termpb):
"""Randomly select crossover point in each individual and exchange each
subtree with the point as root between each individual.
:param ind1: First typed tree participating in the crossover.
:param ind2: Second typed tree participating in the crossover.
:param termpb: The probability of chosing a terminal node (leaf).
:returns: A tuple of two typed trees.
When the nodes are strongly typed, the operator makes sure the
second node type corresponds to the first node type.
The parameter *termpb* sets the probability to choose between a terminal
or non-terminal crossover point. For instance, as defined by Koza, non-
terminal primitives are selected for 90% of the crossover points, and
terminals for 10%, so *termpb* should be set to 0.1.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# Determine wether we keep terminals or primitives for each individual
terminal_op = partial(eq, 0)
primitive_op = partial(lt, 0)
arity_op1 = terminal_op if random.random() < termpb else primitive_op
arity_op2 = terminal_op if random.random() < termpb else primitive_op
# List all available primitive or terminal types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
for idx, node in enumerate(ind1[1:], 1):
if arity_op1(node.arity):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
if arity_op2(node.arity):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
# Set does not support indexing
type_ = random.sample(common_types, 1)[0]
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
######################################
# GP Mutations #
######################################
def mutUniform(individual, expr, pset):
"""Randomly select a point in the tree *individual*, then replace the
subtree at that point as a root by the expression generated using method
:func:`expr`.
:param individual: The tree to be mutated.
:param expr: A function object that can generate an expression when
called.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
slice_ = individual.searchSubtree(index)
type_ = individual[index].ret
individual[slice_] = expr(pset=pset, type_=type_)
return individual,
def mutNodeReplacement(individual, pset):
"""Replaces a randomly chosen primitive from *individual* by a randomly
chosen primitive with the same number of arguments from the :attr:`pset`
attribute of the individual.
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
if len(individual) < 2:
return individual,
index = random.randrange(1, len(individual))
node = individual[index]
if node.arity == 0: # Terminal
term = random.choice(pset.terminals[node.ret])
if isclass(term):
term = term()
individual[index] = term
else: # Primitive
prims = [p for p in pset.primitives[node.ret] if p.args == node.args]
individual[index] = random.choice(prims)
return individual,
def mutEphemeral(individual, mode):
"""This operator works on the constants of the tree *individual*. In
*mode* ``"one"``, it will change the value of one of the individual
ephemeral constants by calling its generator function. In *mode*
``"all"``, it will change the value of **all** the ephemeral constants.
:param individual: The normal or typed tree to be mutated.
:param mode: A string to indicate to change ``"one"`` or ``"all"``
ephemeral constants.
:returns: A tuple of one tree.
"""
if mode not in ["one", "all"]:
raise ValueError("Mode must be one of \"one\" or \"all\"")
ephemerals_idx = [index
for index, node in enumerate(individual)
if isinstance(node, Ephemeral)]
if len(ephemerals_idx) > 0:
if mode == "one":
ephemerals_idx = (random.choice(ephemerals_idx),)
for i in ephemerals_idx:
individual[i] = type(individual[i])()
return individual,
def mutInsert(individual, pset):
"""Inserts a new branch at a random position in *individual*. The subtree
at the chosen position is used as child node of the created subtree, in
that way, it is really an insertion rather than a replacement. Note that
the original subtree will become one of the children of the new primitive
inserted, but not perforce the first (its position is randomly selected if
the new primitive has more than one child).
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
node = individual[index]
slice_ = individual.searchSubtree(index)
choice = random.choice
# As we want to keep the current node as children of the new one,
# it must accept the return value of the current node
primitives = [p for p in pset.primitives[node.ret] if node.ret in p.args]
if len(primitives) == 0:
return individual,
new_node = choice(primitives)
new_subtree = [None] * len(new_node.args)
position = choice([i for i, a in enumerate(new_node.args) if a == node.ret])
for i, arg_type in enumerate(new_node.args):
if i != position:
term = choice(pset.terminals[arg_type])
if isclass(term):
term = term()
new_subtree[i] = term
new_subtree[position:position + 1] = individual[slice_]
new_subtree.insert(0, new_node)
individual[slice_] = new_subtree
return individual,
def mutShrink(individual):
"""This operator shrinks the *individual* by chosing randomly a branch and
replacing it with one of the branch's arguments (also randomly chosen).
:param individual: The tree to be shrinked.
:returns: A tuple of one tree.
"""
# We don't want to "shrink" the root
if len(individual) < 3 or individual.height <= 1:
return individual,
iprims = []
for i, node in enumerate(individual[1:], 1):
if isinstance(node, Primitive) and node.ret in node.args:
iprims.append((i, node))
if len(iprims) != 0:
index, prim = random.choice(iprims)
arg_idx = random.choice([i for i, type_ in enumerate(prim.args) if type_ == prim.ret])
rindex = index + 1
for _ in range(arg_idx + 1):
rslice = individual.searchSubtree(rindex)
subtree = individual[rslice]
rindex += len(subtree)
slice_ = individual.searchSubtree(index)
individual[slice_] = subtree
return individual,
######################################
# GP bloat control decorators #
######################################
def staticLimit(key, max_value):
"""Implement a static limit on some measurement on a GP tree, as defined
by Koza in [Koza1989]. It may be used to decorate both crossover and
mutation operators. When an invalid (over the limit) child is generated,
it is simply replaced by one of its parents, randomly selected.
This operator can be used to avoid memory errors occuring when the tree
gets higher than 90 levels (as Python puts a limit on the call stack
depth), because it can ensure that no tree higher than this limit will ever
be accepted in the population, except if it was generated at initialization
time.
:param key: The function to use in order the get the wanted value. For
instance, on a GP tree, ``operator.attrgetter('height')`` may
be used to set a depth limit, and ``len`` to set a size limit.
:param max_value: The maximum value allowed for the given measurement.
:returns: A decorator that can be applied to a GP operator using \
:func:`~deap.base.Toolbox.decorate`
.. note::
If you want to reproduce the exact behavior intended by Koza, set
*key* to ``operator.attrgetter('height')`` and *max_value* to 17.
.. [Koza1989] J.R. Koza, Genetic Programming - On the Programming of
Computers by Means of Natural Selection (MIT Press,
Cambridge, MA, 1992)
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
if key(ind) > max_value:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
######################################
# GP bloat control algorithms #
######################################
def harm(population, toolbox, cxpb, mutpb, ngen,
alpha, beta, gamma, rho, nbrindsmodel=-1, mincutoff=20,
stats=None, halloffame=None, verbose=__debug__):
"""Implement bloat control on a GP evolution using HARM-GP, as defined in
[Gardner2015]. It is implemented in the form of an evolution algorithm
(similar to :func:`~deap.algorithms.eaSimple`).
:param population: A list of individuals.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:param ngen: The number of generation.
:param alpha: The HARM *alpha* parameter.
:param beta: The HARM *beta* parameter.
:param gamma: The HARM *gamma* parameter.
:param rho: The HARM *rho* parameter.
:param nbrindsmodel: The number of individuals to generate in order to
model the natural distribution. -1 is a special
value which uses the equation proposed in
[Gardner2015] to set the value of this parameter :
max(2000, len(population))
:param mincutoff: The absolute minimum value for the cutoff point. It is
used to ensure that HARM does not shrink the population
too much at the beginning of the evolution. The default
value is usually fine.
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
This function expects the :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
:meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
registered in the toolbox.
.. note::
The recommended values for the HARM-GP parameters are *alpha=0.05*,
*beta=10*, *gamma=0.25*, *rho=0.9*. However, these parameters can be
adjusted to perform better on a specific problem (see the relevant
paper for tuning information). The number of individuals used to
model the natural distribution and the minimum cutoff point are less
important, their default value being effective in most cases.
.. [Gardner2015] M.-A. Gardner, C. Gagne, and M. Parizeau, Controlling
Code Growth by Dynamically Shaping the Genotype Size Distribution,
Genetic Programming and Evolvable Machines, 2015,
DOI 10.1007/s10710-015-9242-8
"""
def _genpop(n, pickfrom=[], acceptfunc=lambda s: True, producesizes=False):
# Generate a population of n individuals, using individuals in
# *pickfrom* if possible, with a *acceptfunc* acceptance function.
# If *producesizes* is true, also return a list of the produced
# individuals sizes.
# This function is used 1) to generate the natural distribution
# (in this case, pickfrom and acceptfunc should be let at their
# default values) and 2) to generate the final population, in which
# case pickfrom should be the natural population previously generated
# and acceptfunc a function implementing the HARM-GP algorithm.
producedpop = []
producedpopsizes = []
while len(producedpop) < n:
if len(pickfrom) > 0:
# If possible, use the already generated
# individuals (more efficient)
aspirant = pickfrom.pop()
if acceptfunc(len(aspirant)):
producedpop.append(aspirant)
if producesizes:
producedpopsizes.append(len(aspirant))
else:
opRandom = random.random()
if opRandom < cxpb:
# Crossover
aspirant1, aspirant2 = toolbox.mate(*map(toolbox.clone,
toolbox.select(population, 2)))
del aspirant1.fitness.values, aspirant2.fitness.values
if acceptfunc(len(aspirant1)):
producedpop.append(aspirant1)
if producesizes:
producedpopsizes.append(len(aspirant1))
if len(producedpop) < n and acceptfunc(len(aspirant2)):
producedpop.append(aspirant2)
if producesizes:
producedpopsizes.append(len(aspirant2))
else:
aspirant = toolbox.clone(toolbox.select(population, 1)[0])
if opRandom - cxpb < mutpb:
# Mutation
aspirant = toolbox.mutate(aspirant)[0]
del aspirant.fitness.values
if acceptfunc(len(aspirant)):
producedpop.append(aspirant)
if producesizes:
producedpopsizes.append(len(aspirant))
if producesizes:
return producedpop, producedpopsizes
else:
return producedpop
halflifefunc = lambda x: (x * float(alpha) + beta)
if nbrindsmodel == -1:
nbrindsmodel = max(2000, len(population))
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print logbook.stream
# Begin the generational process
for gen in range(1, ngen + 1):
# Estimation population natural distribution of sizes
naturalpop, naturalpopsizes = _genpop(nbrindsmodel, producesizes=True)
naturalhist = [0] * (max(naturalpopsizes) + 3)
for indsize in naturalpopsizes:
# Kernel density estimation application
naturalhist[indsize] += 0.4
naturalhist[indsize - 1] += 0.2
naturalhist[indsize + 1] += 0.2
naturalhist[indsize + 2] += 0.1
if indsize - 2 >= 0:
naturalhist[indsize - 2] += 0.1
# Normalization
naturalhist = [val * len(population) / nbrindsmodel for val in naturalhist]
# Cutoff point selection
sortednatural = sorted(naturalpop, key=lambda ind: ind.fitness)
cutoffcandidates = sortednatural[int(len(population) * rho - 1):]
# Select the cutoff point, with an absolute minimum applied
# to avoid weird cases in the first generations
cutoffsize = max(mincutoff, len(min(cutoffcandidates, key=len)))
# Compute the target distribution
targetfunc = lambda x: (gamma * len(population) * math.log(2) /
halflifefunc(x)) * math.exp(-math.log(2) *
(x - cutoffsize) / halflifefunc(x))
targethist = [naturalhist[binidx] if binidx <= cutoffsize else
targetfunc(binidx) for binidx in range(len(naturalhist))]
# Compute the probabilities distribution
probhist = [t / n if n > 0 else t for n, t in zip(naturalhist, targethist)]
probfunc = lambda s: probhist[s] if s < len(probhist) else targetfunc(s)
acceptfunc = lambda s: random.random() <= probfunc(s)
# Generate offspring using the acceptance probabilities
# previously computed
offspring = _genpop(len(population), pickfrom=naturalpop,
acceptfunc=acceptfunc, producesizes=False)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print logbook.stream
return population, logbook
def graph(expr):
"""Construct the graph of a tree expression. The tree expression must be
valid. It returns in order a node list, an edge list, and a dictionary of
the per node labels. The node are represented by numbers, the edges are
tuples connecting two nodes (number), and the labels are values of a
dictionary for which keys are the node numbers.
:param expr: A tree expression to convert into a graph.
:returns: A node list, an edge list, and a dictionary of labels.
The returned objects can be used directly to populate a
`pygraphviz <http://networkx.lanl.gov/pygraphviz/>`_ graph::
import pygraphviz as pgv
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = pgv.AGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
g.layout(prog="dot")
for i in nodes:
n = g.get_node(i)
n.attr["label"] = labels[i]
g.draw("tree.pdf")
or a `NetworX <http://networkx.github.com/>`_ graph::
import matplotlib.pyplot as plt
import networkx as nx
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show()
.. note::
We encourage you to use `pygraphviz
<http://networkx.lanl.gov/pygraphviz/>`_ as the nodes might be plotted
out of order when using `NetworX <http://networkx.github.com/>`_.
"""
nodes = range(len(expr))
edges = list()
labels = dict()
stack = []
for i, node in enumerate(expr):
if stack:
edges.append((stack[-1][0], i))
stack[-1][1] -= 1
labels[i] = node.name if isinstance(node, Primitive) else node.value
stack.append([i, node.arity])
while stack and stack[-1][1] == 0:
stack.pop()
return nodes, edges, labels
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"juancq@gmail.com"
] | juancq@gmail.com |
a781d858c1e8c02c4bd691f9fe847fe576644d9a | c345c0777b12a1c79ac1c0899e265517b4276e6a | /mongodm/document.py | bf073ec30654e93957b3d8d2f66203ef5faf7c55 | [] | no_license | ezekial4/mongodm | 25477cb0269fd4a60e24abd208debeb518c69904 | cd9c930494469c92b70db33005cf76b9c7aeaded | refs/heads/master | 2021-01-22T17:58:13.296692 | 2010-10-10T14:04:26 | 2010-10-10T14:04:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from mongodm.base import BaseDocument
class Document(BaseDocument):
pass
class EmbeddedDocument(BaseDocument):
pass | [
"jean-philippe.serafin@dev-solutions.fr"
] | jean-philippe.serafin@dev-solutions.fr |
81b6e5b811ec388584eaa75528e0c1cda7f67453 | 22121f89b1b038ac22d00567320ae7a9216df854 | /src/project/settings.py | d21f2307888b558aedc68392392d073f8a0b1964 | [] | no_license | lyudmila-petrova/base-docker-django2 | 6b90414b172ff503f2050662f3b9b81eea3e8e94 | 9ac4723622bbe92269b5278efd52eb2c901a3f09 | refs/heads/master | 2020-04-16T11:28:13.537035 | 2019-01-13T17:55:18 | 2019-01-13T17:55:18 | 165,537,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,203 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n+kvf40w((%wg#d0+&(3&4^nj$m!67&h0n^&1irj_dai0&7)c)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['192.168.99.100', 'localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'dev',
'USER': 'postgres',
'HOST': 'db',
'PASSWORD': 'pw',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"ging74ru@gmail.com"
] | ging74ru@gmail.com |
e30fa6506f350a97a745e086e428e4acb9a12c07 | 3deb007e8e5704c8f27d4afd14625dd47131320a | /radio_recorder/src/Player/myomxplayer.py | f98e62fbdb5463ea53e9432e124bd142985b8de1 | [] | no_license | NABUKAT/radio_recorder | d9a2bbaf92d4af1845e024429ef6493cf9e686e7 | fb84b6974b3a3ce487a5f2e2a1350651d20635e7 | refs/heads/master | 2021-06-24T07:57:45.220545 | 2020-12-20T01:21:02 | 2020-12-20T01:21:02 | 180,361,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,898 | py | # -*- coding: utf-8 -*-
import os
import signal
import subprocess
import time
class MyOmxplayer:
def newplay(self, filepath):
self.stop()
command = 'exec omxplayer -o alsa ' + filepath
subprocess.Popen(command, shell=True, stdin=subprocess.PIPE)
self.setState("play")
def stop(self):
if self.getState() == "play":
self.pid = subprocess.Popen("ps aux | grep -v grep | grep ' -o alsa ' | awk '{print $2}'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
self.pid = self.pid.splitlines()
for p in self.pid:
print(p)
os.kill(int(p), signal.SIGTERM)
self.setState("stop")
def play(self):
if self.getState() == "pause":
self.command("p")
self.setState("play")
def pause(self):
if self.getState() == "play":
self.command("p")
self.setState("pause")
def p30(self):
if self.getState() != "stop":
self.command("^[[C")
def p600(self):
if self.getState() != "stop":
self.command("^[[A")
def m30(self):
if self.getState() != "stop":
self.command("^[[D")
def m600(self):
if self.getState() != "stop":
self.command("^[[B")
def confirm(self):
self.pnum = subprocess.Popen("ps aux | grep -v grep | grep 'omxplayer -o alsa ' | wc -l", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
self.pnum = str(self.pnum, "utf-8").splitlines()
if self.pnum[0] == "0":
return "stop"
return self.getState()
def command(self, com):
pid = subprocess.Popen("ps aux | grep -v grep | grep 'omxplayer -o alsa ' | awk '{print $2}'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
pid = str(pid, "utf-8").splitlines()
for p in pid:
print(os.path.join("/proc", p, "fd", "0"))
with open(os.path.join("/proc", p, "fd", "0"), "w") as s:
s.write(com)
break
def getState(self):
f = open(os.path.join(os.path.dirname(__file__), "player_state.txt"), "r")
state = f.read()
f.close()
return state
def setState(self, state):
f = open(os.path.join(os.path.dirname(__file__), "player_state.txt"), "w")
f.write(state)
f.close()
if __name__ == '__main__':
filepath = "/media/radiko/伊集院光の深夜の馬鹿力/20181127_伊集院光の深夜の馬鹿力.m4a"
mop = MyOmxplayer()
mop.newplay(filepath)
time.sleep(5)
mop.p30()
#mop.pause()
#print(mop.confirm())
#time.sleep(5)
#mop.play()
#time.sleep(5)
#mop.stop()
| [
"mekakujira@infoseek.jp"
] | mekakujira@infoseek.jp |
50ed5e211beb07bedfe62e50e7f6390c2f8e1611 | b981653d94eb5f6468025570170ebbe8b360f3a3 | /Assignment2/task3.py | 21c34855e311b996c93aadf25bd01614f317d86a | [] | no_license | sophia-e/SP | 6ede43a0e0c244457255a27e1c9ea76c9f2df401 | fa84148000abd8a17dd1c20dae530f484c109e5d | refs/heads/master | 2021-01-24T00:31:54.547253 | 2018-05-20T18:13:20 | 2018-05-20T18:13:20 | 122,767,685 | 0 | 0 | null | 2018-03-09T17:55:27 | 2018-02-24T18:30:45 | Shell | UTF-8 | Python | false | false | 817 | py | import sys
def findMatrix(mat1,mat2):
mat1 = [int(i) for i in mat1] # converting lst of str to int
count=0
for row in range(7):
for col in range(7):
if mat1[count] is mat2[row][col] and mat1[count+1] is mat2[row][col+1] and mat1[count+2] is mat2[row+1][col] and mat1[count+3] is mat2[row+1][col+1]:
print "Matrix found at (%d,%d)" %(row,col)
return;
col=col+1
row=row+1
print "Matrix not found"
return;
print("main")
matrix = [[1,2,3,4,5,6,7,8],
[9,10,11,12,13,14,15,16],
[17,18,19,20,21,22,23,24],
[25,26,17,28,29,30,31,32],
[33,34,35,36,37,38,39,40],
[41,42,43,44,45,46,47,48],
[49,50,51,52,53,54,55,56],
[57,58,59,60,61,62,63,64]]
if len(sys.argv[1:]) == 4 :
findMatrix(sys.argv[1:],matrix)
else :
print ("you did not enter a 2X2 matrix")
| [
"noreply@github.com"
] | noreply@github.com |
df838dbd39a8ceea717cda702aa7f1c9d5478024 | 7264c6b1b91548926cfa3f2da488e3df110c4040 | /clean_data/splice_data.py | bdc53968bf524c2a69f239635369276aa5e9abb8 | [] | no_license | sonalg49/crop-yield-prediction-project | 78c5b610f25cd178bd7ab90e0662572d4cf8940e | 5e1a25a3ae536576692b554ec6da1c306b59947a | refs/heads/master | 2020-05-16T08:02:44.934968 | 2017-06-13T00:44:24 | 2017-06-13T00:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | import os
import numpy as np
import sys
src = sys.argv[1]
ref = sys.argv[2]
dst = sys.argv[3]
def splice_data(src, ref, dst):
ref_data = np.load(os.path.expanduser(ref))
csv = np.genfromtxt(os.path.expanduser(src), delimiter=',')
# Unpack the data from the .npz
ref_index = ref_data['output_index']
ref_year = ref_data['output_year']
ref_image = ref_data['output_image']
ref_locations = ref_data['output_locations']
ref_yield = ref_data['output_yield']
# Concatenate ref_year and ref_index for comparison
temp = ref_year[:, np.newaxis]
ref_search = np.concatenate([temp, ref_index], axis=1)
indices = []
csv_indices = []
for i in range(csv.shape[0]):
if i % 1000 == 0:
print("On iteration %d." % i)
result = np.where(np.all(ref_search == csv[i,:-1], axis=1))[0]
if result.shape[0] == 0: # No match found
print("ERROR, could not find %s" % csv[i])
continue
found_i = result[0]
indices.append(found_i)
csv_indices.append(i)
# After this step, indices will have the indices from ref that we want to include.
out_index = ref_index[indices]
out_year = ref_year[indices]
out_image = ref_image[indices]
out_locations = ref_locations[indices]
out_yield = csv[csv_indices, 3]
assert(len(indices) == len(csv_indices))
np.savez(os.path.expanduser(dst),
output_index=out_index,
output_year=out_year,
output_image=out_image,
output_locations=out_locations,
output_yield=out_yield)
splice_data(src, ref, dst)
| [
"msabini@stanford.edu"
] | msabini@stanford.edu |
6751813df7cadcbc722015f087934164f1982cbe | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res_bw/scripts/common/lib/email/mime/nonmultipart.py | 527fda5afdf5a3217564dd26d8a2f0384691bce1 | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 730 | py | # 2016.02.14 12:47:55 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/email/mime/nonmultipart.py
"""Base class for MIME type messages that are not multipart."""
__all__ = ['MIMENonMultipart']
from email import errors
from email.mime.base import MIMEBase
class MIMENonMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def attach(self, payload):
raise errors.MultipartConversionError('Cannot attach additional subparts to non-multipart/*')
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\email\mime\nonmultipart.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:47:55 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
93d1146e7a1e36b119bebab77ccc0082e0014b5b | cb9a320899c40b8111bb5d6958ffe4ce9db36415 | /views.py | 4201532fae2f9230ddb64f6285ca58d4b8cbe9ee | [] | no_license | smc98m/Time_Display | 89c402435267e4f0adfff00d31635cb705594532 | 3b0e05efb0d3fa8945682343d2c7a6c569895d5f | refs/heads/master | 2021-05-17T20:58:40.527270 | 2020-03-29T04:04:54 | 2020-03-29T04:04:54 | 250,949,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from django.shortcuts import render
from time import localtime, strftime
def index(request):
context = {
"time": strftime("%b %d, %Y %I:%M %p", localtime())
}
return render(request,'index.html', context)
| [
"stevecates78@gmail.com"
] | stevecates78@gmail.com |
f93fb4cb2935ee90b00aaa233691b20fc3ec8674 | a9d324ce2a39a77936450ffd7981329c431bab08 | /NLP_QP_DAY1_22DEC_FINAL/mymaxent.py | e12be9c244f73c0aec0357d5c7b778acc1e32cad | [] | no_license | rashrag/nlp-eval-day1 | a22ad5a8b88b5fe0445dcf6aae73f350ba30ffb2 | 3ee90a361cc49395a0fc1def8517406b322f2e4e | refs/heads/master | 2020-05-31T06:33:21.375982 | 2014-12-22T09:55:54 | 2014-12-22T09:55:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,183 | py | '''
MyMaxEnt.py
MaxEnt Classifier
Author: Anantharaman Narayana Iyer
Date: 21 Nov 2014
'''
import json
import numpy
import math
import pickle
from scipy.optimize import minimize as mymin
import datetime
# ----------------------------------------------------------------------------------------
# maxent implementation
# ----------------------------------------------------------------------------------------
class MyMaxEnt(object):
def __init__(self, history_tuples, function_obj, reg_lambda = 0.01, pic_file = None):
# history_tuples is of the form: ((ta, tb, wn, i), tag) where ta = tag t-2, tb = tag t-1, wn = pointer to a sentence, i = current index
# function_list is of the form: [(pointer_to_function_f1, tag_for_f1), (pointer_to_function_f2, tag_for_f2)...]
# reg_lambda = regularization coefficient
# pic_file = Name of file where the classifier is pickled
self.h_tuples = history_tuples
self.func = function_obj
self.reg = reg_lambda
self.dataset = None # this will be set by create_dataset
self.tag_set = self.func.supported_tags #None # this will be also be set by create_dataset - this is the set of all tags
'''given by raksh'''
self.create_dataset()
self.dim = self.dataset.shape[1] #len(self.dataset[0])
self.num_examples = self.dataset.shape[0]
self.model = numpy.array([0 for d in range(self.dim)]) # initialize the model to all 0
self.pic_file = pic_file
return
def create_dataset(self):
self.dataset = []
self.all_data = {}
for h in self.h_tuples[:7500]: # h represents each example x that we will convert to f(x, y)
for tag in self.tag_set:
feats = self.all_data.get(tag, [])
val = self.get_feats(h[0], tag)
feats.append(val)
self.all_data[tag] = feats
'''feats has the feature vector for a particular h'''
if (h[1] == tag):
self.dataset.append(val)
'''remove this'''
for i in self.dataset:
print i
'''remove this'''
for k, v in self.all_data.items():
self.all_data[k] = numpy.array(v)
self.dataset = numpy.array(self.dataset)
'''remove this'''
for i in self.dataset:
print i
'''remove this'''
return
def get_feats(self, xi, tag): # xi is the history tuple and tag is y belonging to Y (the set of all labels
# xi is of the form: history where history is a 4 tuple by itself
# self.func is the function object
''' returns feature vector of dimensionality n features and have 7500 fateure vectors'''
'''copy evaluate'''
return self.func.evaluate(xi, tag)
def train(self):
dt1 = datetime.datetime.now()
print 'before training: ', dt1
params = mymin(self.cost, self.model, method = 'L-BFGS-B') #, jac = self.gradient) # , options = {'maxiter':100}
self.model = params.x
dt2 = datetime.datetime.now()
print 'after training: ', dt2, ' total time = ', (dt2 - dt1).total_seconds()
if self.pic_file != None:
pickle.dump(self.model, open(self.pic_file, "wb"))
return
def p_y_given_x(self, xi, tag): # given xi determine the probability of y - note: we have all the f(x, y) values for all y in the dataset
normalizer = 0.0
feat = self.get_feats(xi, tag)
dot_vector = numpy.dot(numpy.array(feat), self.model)
for t in self.tag_set:
feat = self.get_feats(xi, t, dp = numpy.dot(numpy.array(feat), self.model))
if dp == 0:
normalizer += 1.0
else:
normalizer += math.exp(dp)
if dot_vector == 0:
val = 1.0
else:
val = math.exp(dot_vector) #
result = float(val) / normalizer
return result
def classify(self, xi):
if self.pic_file != None:
self.model = pickle.load(open(self.pic_file, "rb"))
maxval = 0.0
result = None
for t in self.tag_set:
val = self.p_y_given_x(xi, t)
if val >= maxval:
maxval = val
result = t
return result
def cost(self, params):
self.model = params
sum_sqr_params = sum([p * p for p in params]) # for regularization
reg_term = 0.5 * self.reg * sum_sqr_params
dot_vector = numpy.dot(self.dataset, self.model)
empirical = numpy.sum(dot_vector) # this is the emperical counts
expected = 0.0
for j in range((self.num_examples)):
mysum = 0.0
for tag in self.tag_set: # get the jth example feature vector for each tag
fx_yprime = self.all_data[tag][j] #self.get_feats(self.h_tuples[j][0], tag)
'''
dot_prod = 0.0
for f in range(len(fx_yprime)):
if fx_yprime[f] != 0:
dot_prod += self.model[f]
'''
dot_prod = numpy.dot(fx_yprime, self.model)
if dot_prod == 0:
mysum += 1.0
else:
mysum += math.exp(dot_prod)
expected += math.log(mysum)
print "Cost = ", (expected - empirical + reg_term)
return (expected - empirical + reg_term)
def gradient(self, params):
self.model = params
gradient = []
for k in range(self.dim): # vk is a m dimensional vector
reg_term = self.reg * params[k]
empirical = 0.0
expected = 0.0
for dx in self.dataset:
empirical += dx[k]
for i in range(self.num_examples):
mysum = 0.0 # exp value per example
for t in self.tag_set: # for each tag compute the exp value
fx_yprime = self.all_data[t][i] #self.get_feats(self.h_tuples[i][0], t)
# --------------------------------------------------------
# computation of p_y_given_x
normalizer = 0.0
dot_vector = numpy.dot(numpy.array(fx_yprime), self.model)
for t1 in self.tag_set:
feat = self.all_data[t1][i]
dp = numpy.dot(numpy.array(feat), self.model)
if dp == 0:
normalizer += 1.0
else:
normalizer += math.exp(dp)
if dot_vector == 0:
val = 1.0
else:
val = math.exp(dot_vector) #
prob = float(val) / normalizer
# --------------------------------------------------------
mysum += prob * float(fx_yprime[k])
expected += mysum
gradient.append(expected - empirical + reg_term)
return numpy.array(gradient)
if __name__ == "__main__":
pass
| [
"rashmi.raghunandan93@gmail.com"
] | rashmi.raghunandan93@gmail.com |
f36f95ecbfd8f569c0474782833e94cff1d6118f | d3a34d883da6a8eef07b0a0956a06ebe9549353d | /functions_secondary.py | f8a1120fb35813191d82c7211f6bec3f55160408 | [] | no_license | BriBean/Functions | 49999fa1fb03e4d18a44de89a808d380106f7cea | 6769663853dad2ae1bba4d7ab01034f6db43da9c | refs/heads/master | 2023-06-17T03:05:16.353381 | 2021-07-11T22:12:18 | 2021-07-11T22:12:18 | 383,827,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | # --------------- Section 3 --------------- #
# 1 | Fahrenheit to Celsius Conversion
#
# Fahrenheit is way we measure temperature and is commonly used in the United States, the Cayman Islands, and Liberia.
# The other unit of measurement is celsius. Celsius is commonly used throughout the rest of the world. Since they are
# different units of measurement, then the exact same temperature will have different values.
#
# For example, 68° fahrenheit is 20° celsius.
# https://www.google.com/search?client=firefox-b-1-d&q=68+degrees+fahrenheit+to+celsius
#
#
# To calculate celsius from fahrenheit, you use the following equation:
# c = (f - 32) * (5/9)
# where; f represents degrees fahrenheit
# c represents degrees celsius
#
# Function
# 1 - Define a function that will convert a temperature in fahrenheit to celsius.
# 2 - Define a function that will convert a temperature in celsius to fahrenheit.
# 3 - Return the new temperature.
#
# Function Call
# 1 - Call both of these functions, and save the return value. Use any temperature.
# 2 - Print the old and converted temperature.
#
# EXAMPLE OUTPUT:
# 72° fahrenheit is 22.2222° celsius
# 10° celsius is 50° fahrenheit
#
# WRITE CODE BELOW
# 2 | Celsius to Kelvin
#
# There is another unit of measurement, called kelvin. It is closely related to celsius. In fact to convert an equation
# from Celsius to Kelvin is as thus:
# k = c + 273.15
# where; c represents degrees celsius
#
# Function
# 1 - Define a function that will convert a temperature in celsius to kelvin.
# 2 - Define a function that will convert a temperature in kelvin to celsius.
# 3 - Return the new temperature.
#
# Function Call
# 1 - Call both of these functions, and save the return value. Use any temperature.
# 2 - Print the old and converted temperature.
#
# EXAMPLE OUTPUT:
# 45° celsius is 318.15° kelvin
# 232° kelvin is -41.15° celsius
#
# WRITE CODE BELOW
# Question: How could you use these functions to convert a temperature in fahrenheit to kelvin?
| [
"86678213+BriBean@users.noreply.github.com"
] | 86678213+BriBean@users.noreply.github.com |
284b51d2f5405f72fc1aedcc1386ba663dd15b2d | 533b446fe70eb6cd609aadb65a53a73f58471c21 | /application/controllers/callback.py | 9ef91ffdee2e225daf0f65095c9db5aa6ddb6e4d | [
"Apache-2.0"
] | permissive | suqi/mdpress | 1db53e8ec2bfb1d4ea79fe29efeb9f91cd683f28 | b7d03d84c234d527e70a254bf52f96b1868fe34b | refs/heads/master | 2021-01-13T01:21:12.956530 | 2016-11-27T15:21:02 | 2016-11-27T15:21:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | #!/usr/bin/env python
# encoding: utf-8
from flask import (request, Blueprint, send_from_directory,
current_app as app, url_for, abort, redirect)
callback_bp = Blueprint('callback', __name__, '/callback')
@callback_bp.route('duoshuo', methods=['POST'])
def duoshuo_callback():
pass
| [
"liqianglau@outlook.com"
] | liqianglau@outlook.com |
903a4c7882fc3b807bd0373c0f0d85497a3184e3 | 7d5d8492c2d88b88bdc57e3c32db038a7e7e7924 | /MarkSim-DSSAT/batchSites.py | cd21af694dfdd39389f5cc92bd3284f2a55387b3 | [] | no_license | CIAT-DAPA/dapa-climate-change | 80ab6318d660a010efcd4ad942664c57431c8cce | 2480332e9d61a862fe5aeacf6f82ef0a1febe8d4 | refs/heads/master | 2023-08-17T04:14:49.626909 | 2023-08-15T00:39:58 | 2023-08-15T00:39:58 | 39,960,256 | 15 | 17 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | #Read in a data file and apply MarkSimGCM python script
import sys, os
import csv
csvFile = sys.argv[1]
model = sys.argv[2]
# Dictionary models
decDc = {"avr": "average", "bcc": "bccr_bcm2_0", "cnr": "cnrm_cm3", "csi": "csiro_mk3_5", "ech": "mpi_echam5", "inm": "inm_cm3_0", "mir": "miroc3_2_medres"}
yearList = "2020", "2050"
for year in yearList:
dataIn = csv.reader(open(csvFile, 'rb'), delimiter=' ', quotechar=' ')
for row in dataIn:
rowData = ', '.join(row)
siteName = rowData.split(",")[0]
siteLat = rowData.split(",")[1]
siteLong = rowData.split(",")[2]
workspace = "D:\\MarkSimGCM\\TOR_GCM\\" + str(year) + "\\" + str(decDc[str(model)]) + "\\" + os.path.basename(csvFile)[:-4]
#Running MarkSimGCM
os.system("marksimgcm-v1.1.py D:/MarkSimGCM/worldll.mtg D:/MarkSimGCM/MarkDat D:/MarkSimGCM/gcm4data " + workspace + " " + siteName + " " + model + " a2 " + str(year) + " 1234 99 " + siteLat + " " + siteLong)
#[0] = location of WorldClim 30s file FIJO
#[1] = location of marksim data FIJO
#[2] = location of GCM data FIJO
#[3] = location of output workspace FIJO
#[4] = model: ech, csi, avr, bcc, cnr, inm, mir VAR
#[5] = scenario: a1,b1,a2 FIJO
#[6] = year FIJO
#[7] = seed FIJO
#[8] = replications FIJO
#[9] = latitude
#[10] = longitude
#[11] = country
| [
"cenavarror@gmail.com"
] | cenavarror@gmail.com |
6ac6afcab379b4e921ab4f97c473f228d7f8a37d | eb70555428f1bcc1e41f345c2d9f7f5530c9e295 | /polleria/migrations/0004_invoices.py | 9569c669b1e4fa9a9db773f0aad04b1c9f162767 | [] | no_license | Suheidy19/Proyecto-parte-2 | d3ebe62020c147ef0c2c6d3a629804772d0406c0 | 842e1cad0d340286de2bd9cd9b710675f35cd28c | refs/heads/main | 2023-01-08T19:35:07.297337 | 2020-11-15T02:08:04 | 2020-11-15T02:08:04 | 312,938,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | # Generated by Django 3.1.2 on 2020-10-28 22:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polleria', '0003_auto_20201028_1618'),
]
operations = [
migrations.CreateModel(
name='Invoices',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.CharField(max_length=20, verbose_name='Fecha de factura')),
('Total', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='Total de la venta')),
('Name_Clients', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polleria.clients', verbose_name='Nombre Cliente')),
],
options={
'verbose_name': 'Factura',
'verbose_name_plural': 'Facturas',
},
),
]
| [
"noreply@github.com"
] | noreply@github.com |
04a89b48cb3d11d283f06aa2d0dd8ce0d886a918 | e783070ae1c50e529fb97af90df2a2bbd2c561fa | /articledumper.py | a5631b689c7f1cc06db49d2655ef46888555f540 | [] | no_license | observantdimension/twitter-bot-prediction-code | 18113798a77e7e9740c53676d703e49335e83129 | 30b7ed839beae60e6e453d5e4100179540e96f32 | refs/heads/master | 2022-04-15T06:50:16.594715 | 2020-04-13T19:17:52 | 2020-04-13T19:17:52 | 255,409,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | import shared
import json
import typing
from nytimes import api
def collect_article_pages(params: dict, page_count: int = 10) -> typing.List[dict]:
pages: typing.List[dict] = []
for i in range(page_count):
params['page'] = i
print("Fetching page index %d" % i)
pages.append(api.do_article_search(params=params))
return pages
def run_app():
recent_political_articles = collect_article_pages(
{
'fq': 'subsection_name:("Politics") AND source:("The New York Times") AND persons:("Trump, Donald J") AND '
'document_type:("article")',
'sort': 'newest'
}, 5)
with open('generated_data/recent_political_articles.json', 'w', encoding='utf-8') as articlesFile:
json.dump(recent_political_articles, articlesFile, sort_keys=True, indent=4, ensure_ascii=False)
pass
run_app()
| [
"noreply@github.com"
] | noreply@github.com |
17cbd59404e6774d0093023cd921bea9c0b812b8 | 3e5e8d6c1b39d459f4e489db083bd437f88bf213 | /path/path_server.py | f998079d6e3812562f8b43147012f9300ab9e3bd | [] | no_license | emonson/SamVis | 37b4f92e482a5227520c4f6b95896ab35d0b71e5 | 98f1dc793bc6a0a38785cb279cd8d27a44807b8b | refs/heads/master | 2020-06-04T03:04:53.257031 | 2014-10-30T17:34:39 | 2014-10-30T17:34:39 | 9,029,161 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,354 | py | import cherrypy
import json
from path_obj import PathObj
import os
import glob
class ResourceIndex(object):
def __init__(self, server_url, data_names):
self.server_url = server_url
self.data_names = data_names
@cherrypy.expose
def index(self):
return self.to_html()
@cherrypy.expose
def datasets(self):
return json.dumps(self.data_names)
def to_html(self):
html_item = lambda (name): '<div><a href="' + self.server_url + '?data={name}">{name}</a></div>'.format(**vars())
items = map(html_item, self.data_names)
items = ''.join(items)
return '<html>{items}</html>'.format(**vars())
class PathServer:
# _cp_config = {'tools.gzip.on': True}
def __init__(self, path):
print 'STARTING UP', path
self.path = PathObj(path)
@cherrypy.expose
def index(self):
return self.path.path_data_dir
# ------------
# Paths
@cherrypy.expose
@cherrypy.tools.gzip()
def districtcoords(self, district_id = None, depth = 1, previous_id = None, rold = "1.0, 0.0, 0.0, 1.0"):
if district_id is not None:
dist_id = int(district_id)
d = int(depth)
if previous_id is not None:
prev_id = int(previous_id)
else:
prev_id = dist_id
R_old = self.parse_rold(rold)
return self.path.GetDistrictDeepPathLocalRotatedCoordInfo_JSON(dist_id, prev_id, d, R_old)
# ------------
# Ellipses
@cherrypy.expose
@cherrypy.tools.gzip()
def districtellipses(self, district_id = None, type = 'space', previous_id = None, rold = "1.0, 0.0, 0.0, 1.0"):
if district_id is not None:
dist_id = int(district_id)
if previous_id is not None:
prev_id = int(previous_id)
else:
prev_id = dist_id
R_old = self.parse_rold(rold)
if type == 'diffusion':
return self.path.GetDistrictDiffusionRotatedEllipses_JSON(dist_id, prev_id, R_old)
else:
return self.path.GetDistrictLocalRotatedEllipses_JSON(dist_id, prev_id, R_old)
# ------------
# Query
@cherrypy.expose
@cherrypy.tools.gzip()
def pathtimedistrict(self, time=None):
if time is not None:
t = int(time)
# Get district ID for path at a specified time
return self.path.GetDistrictFromPathTime_JSON(t)
@cherrypy.expose
@cherrypy.tools.gzip()
def netpoints(self):
# 2D coordinates of overview of district centers
return self.path.GetNetPoints_JSON()
@cherrypy.expose
@cherrypy.tools.gzip()
def datainfo(self):
# {datatype:('image', 'gene',...), shape:[n_rows, n_cols], alldata_bounds:[min, max]}}
return self.path.GetDataInfo_JSON()
@cherrypy.expose
@cherrypy.tools.gzip()
def transitiongraph(self):
# nodes (with ids and occupation times) and edges (with transition sums)
return self.path.GetTransitionGraph_JSON()
@cherrypy.expose
@cherrypy.tools.gzip()
def timesfromdistrict(self, district_id=None):
if district_id is not None:
dist_id = int(district_id)
# Average 1st passage times to other districts from this one
return self.path.GetTimesFromDistrict_JSON(dist_id)
@cherrypy.expose
@cherrypy.tools.gzip()
def districtcenterdata(self, district_id=None):
if district_id is not None:
dist_id = int(district_id)
# TODO: Make this more general. For now it's just an image for the district center
# TODO: Need to figure out a way to detect early on what type of data is associated
# with each district, and tailor the JS visualizations accordingly, and here
# just grab data without knowing what it is.
return self.path.GetDistrictCenterData_JSON(dist_id)
# ------------
# Utility
def parse_rold(self, rold):
# Parse comma-separated list of four floats encoded as a string
try:
a00, a01, a10, a11 = (float(r) for r in rold.split(','))
R_old = [[a00, a01], [a10, a11]]
except:
R_old = [[1.0, 0.0], [0.0, 1.0]]
return R_old
# ------------
class Root(object):
def __init__(self, names_list):
self.data_names = names_list
@cherrypy.expose
@cherrypy.tools.gzip()
def index(self):
return json.dumps(self.data_names)
# Storing server name and port in a json file for easy config
server_filename = 'server_conf.json'
server_opts = json.loads(open(server_filename).read())
# Go through data directory and add methods to root for each data set
data_dir = server_opts['path_data_dir']
vis_page = 'district_path.html'
data_paths = [xx for xx in glob.glob(os.path.join(data_dir,'*')) if os.path.isdir(xx)]
data_dirnames = [os.path.basename(xx) for xx in data_paths]
# Storing the dataset names in the root so they can easily be passed to the html pages
root = Root(data_dirnames)
# This adds the methods for each data directory
for ii,name in enumerate(data_dirnames):
print name, data_paths[ii]
setattr(root, name, PathServer(data_paths[ii]))
# add the resource index, which will list links to the data sets
base_url = 'http://' + server_opts['server_name'] + '/~' + server_opts['account'] + '/' + server_opts['path_web_path'] + '/' + vis_page
root.resource_index = ResourceIndex(server_url=base_url, data_names=data_dirnames)
# Start up server
cherrypy.config.update({
# 'tools.gzip.on' : True,
'server.socket_port': server_opts['path_port'],
# 'server.socket_host':'127.0.0.1'
'server.socket_host':server_opts['server_name']
})
cherrypy.quickstart(root)
| [
"emonson@cs.duke.edu"
] | emonson@cs.duke.edu |
a91f1a29d2b88913cdb79f5181f207f5e3eadd65 | 05e634a232574f676434dfa8e4183f3d0a1a4bc9 | /paddlecv/ppcv/ops/connector/base.py | 9d315823ec24a76a0e34664c97122662ff637792 | [
"Apache-2.0"
] | permissive | PaddlePaddle/models | 67ac00d93c5255ac64a9d80ae5be2e8927e47cee | 8042c21b690ffc0162095e749a41b94dd38732da | refs/heads/release/2.4 | 2023-09-04T15:23:59.543625 | 2023-07-20T11:54:16 | 2023-07-20T11:54:16 | 88,868,842 | 7,633 | 3,597 | Apache-2.0 | 2023-09-05T23:23:54 | 2017-04-20T13:30:15 | Python | UTF-8 | Python | false | false | 1,062 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
from ppcv.ops.base import BaseOp
class ConnectorBaseOp(BaseOp):
def __init__(self, model_cfg, env_cfg=None):
super(ConnectorBaseOp, self).__init__(model_cfg, env_cfg)
self.name = model_cfg["name"]
keys = self.get_output_keys()
self.output_keys = [self.name + '.' + key for key in keys]
@classmethod
def type(self):
return 'CONNECTOR'
| [
"noreply@github.com"
] | noreply@github.com |
dde962a6155bab28965c2ed4dfa4a581508ce225 | 69d3680f881833a0a4906ad708eac11401bc03c6 | /python3/2. 01背包问题.py | 2741d3ecf6749f95de6819feb609bb510721b0ff | [] | no_license | menghuu/YALeetcode | 21df4b5ea6cb0a249263b0ce2df37e7580477ddd | 1959a884bb1cc9f2f1acb1ba6f413498ea0d1aca | refs/heads/master | 2023-08-18T03:55:41.470428 | 2021-09-11T12:39:02 | 2021-09-11T12:39:02 | 269,104,152 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 m <m@meng.hu>
#
# Distributed under terms of the MIT license.
"""
"""
import sys
N, V = map(int, sys.stdin.readline().strip().split())
# dps[j]
dps = [0 for _ in range(V + 1)]
# dps[i][j]
for _ in range(N):
v, w = map(int, sys.stdin.readline().strip().split())
for j in range(V, v - 1, -1):
dps[j] = max(dps[j], dps[j - v] + w)
print(dps[-1])
| [
"m@meng.hu"
] | m@meng.hu |
28c3703bfafc76e25ef8fc5c9da913bae9874f5d | cc6154486d4546a1394aac53dc5cfa5b1c4dfd02 | /docs/source/conf.py | 24d6fc870cd9b14bf251caaaa8fe5c7eec8edf10 | [
"BSD-3-Clause"
] | permissive | AustralianSynchrotron/tiled | 3c4448030c5fb53c952790ad4c501291bf427214 | 307d2f3b7e9b841afdf5af716f218584e4c3d530 | refs/heads/main | 2023-08-29T04:19:20.768182 | 2021-10-14T00:38:00 | 2021-10-14T00:38:00 | 416,939,893 | 0 | 0 | BSD-3-Clause | 2021-10-14T00:36:20 | 2021-10-14T00:36:19 | null | UTF-8 | Python | false | false | 9,066 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tiled documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 28 12:35:56 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.openapi',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'sphinx_click',
'sphinx_copybutton',
'myst_parser',
]
# Configuration options for plot_directive. See:
# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81
plot_html_show_source_link = False
plot_html_show_formats = False
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tiled'
copyright = '2021, Bluesky Collaboration'
author = 'Bluesky Collaboration'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import tiled
# The short X.Y version.
version = tiled.__version__
# The full version, including alpha/beta/rc tags.
release = tiled.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext trees.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'tiled'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tiled.tex', 'tiled Documentation',
'Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tiled', 'tiled Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tiled', 'tiled Documentation',
author, 'tiled', 'Tile-based access to SciPy/PyData structures over the web in many formats',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'matplotlib': ('https://matplotlib.org', None),
}
import yaml
def generate_schema_documentation(header, schema, target):
# header
with open(header, "r") as f:
header_md = f.readlines()
header_md = header_md[1:]
header_md = [ln.strip("\n") for ln in header_md]
# schema
with open(schema, "r") as f:
data = yaml.safe_load(f)
def parse_schema(d, md=[], depth=0, pre=""):
"""
Generate markdown headers from a passed python dictionary created by
parsing a schema.yaml file.
"""
if "then" in d:
d = d["then"]
if "properties" in d:
depth += 1
# Create markdown headers for each schema level
for key, val in d["properties"].items():
md.append("(schema_%s)=" % (pre + key))
md.append("#" * (depth + 1) + " " + pre + key)
md.append("")
if "description" in val:
for ln in val["description"].split("\n"):
md.append(ln)
md.append("")
parse_schema(val, md, depth, pre + "{}.".format(key))
depth -= 1
if "items" in d:
depth += 1
# Create markdown headers for each schema level
if "properties" in d["items"]:
for key, val in d["items"]["properties"].items():
md.append("(schema_%s)=" % (pre + key))
md.append("#" * (depth + 1) + " " + pre[:-1] + "[item]." + key)
md.append("")
if "description" in val:
for ln in val["description"].split("\n"):
md.append(ln)
md.append("")
parse_schema(val, md, depth, pre + "{}.".format(key))
depth -= 1
return md
schema_md = parse_schema(data)
# reference = header + schema
reference_md = header_md + schema_md
with open(target, "w") as f:
f.write("\n".join(reference_md))
generate_schema_documentation(
"reference/service-configuration-header.txt",
"../../tiled/schemas/service_configuration.yml",
"reference/service-configuration.md",
)
generate_schema_documentation(
"reference/client-profiles-header.txt",
"../../tiled/schemas/client_profiles.yml",
"reference/client-profiles.md",
)
from tiled.trees.in_memory import Tree
from tiled.authenticators import DummyAuthenticator
from tiled.server.app import serve_tree
app = serve_tree(Tree({}), authentication={"authenticator": DummyAuthenticator()})
api = app.openapi()
with open("reference/api.yml", "w") as file:
yaml.dump(api, file) | [
"dallan@bnl.gov"
] | dallan@bnl.gov |
9e5e7c3a4ebae93fb0337ad9bac489c04c22b1bd | f444b54d57d4c98548e57aec73163ce5a591ebc3 | /Pandas v2.py | 15994d5e4571b39c0baf4ab11e2c00817ba21505 | [] | no_license | whitebeard4708/MiniProjectCZ1003 | 8e238965737edf41f03708bfa8a262ea5a2c247a | d44e19e8c014803ca928f081e829d3eceedf3cad | refs/heads/master | 2020-04-25T22:35:23.543020 | 2019-02-04T04:03:26 | 2019-02-04T04:03:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py | import pandas as pd
from pandas import DataFrame
from load_data import *
infocan = infocan.set_index('Canteen')
df = df.set_index(['Canteen'])
#takes in foodtype = ['Food1','food2' etc], pricerange = [lower, higher as floats/int], the search term
# rating = int(1 to 5) or 0 if not specified
def searchfood(foodtype, pricerange, rating, search, df):
#copy the dataframe to temporary.
search_df = df.copy()
#filter by foodtype
if foodtype != []:
foodcond = search_df['Food Type'].isin(foodtype)
search_df = search_df[foodcond]
# filter by price range
low_price = pricerange[0]
high_price = pricerange[1]
pricecond1 = search_df['Price'] >= low_price
pricecond2 = search_df['Price'] <= high_price
search_df = search_df[ pricecond1 & pricecond2 ]
# filter by rating, shows all above specified rating
if rating != 0:
ratingcond = search_df['Rating'] >= rating
search_df = search_df[ ratingcond ]
# filter by menu Item
if search != ' ':
lst = search.lower().split()
for word in lst:
wordcond = search_df['Menu Item'].str.lower().str.contains(word)
search_df = search_df[wordcond]
# return the filtered DataFrame
return search_df
#function to sort by rating given the DataFrame filtered, the output is a list of indexes
def sort_by_rating(filter_df):
filter_df = filter_df.sort_values("Rating")
return choose10(filter_df)
#function to sort by price given the DataFrame filtered, the output is a list of indexes
def sort_by_price(filter_df):
filter_df = filter_df.sort_values("Price")
return choose10(filter_df)
#function to sort by distance based on the user location, the filtered DataFrame and the DataFrame containing
#information about the canteen
def sort_by_location(user_loc, filter_df, infocan):
lst_loc = filter_df.index.unique()
lst_dist = {}
frames = []
for loc in lst_loc:
#calculate the distance
distance = ((user_loc[0] - infocan.loc[loc]['loc x'])**2 + (user_loc[1] - infocan.loc[loc]['loc y'])**2)**(1/2)
#convert from bitmap to km (just giving an approximate of the distance)
distance *= 0.0025
#create a new column in DataFrame to store all of the distances
filter_df.at[loc,'Distance'] = distance
#sort DataFrame according to the distance
filter_df = filter_df.sort_values('Distance')
#select top 10 location to show the user
return choose10(filter_df)
def choose10(filter_df):
frames = []
count = 1
for loc in filter_df.index.unique():
a = filter_df.loc[loc]
frames.append(a)
if count == 10:
break
count += 1
if frames !=[]:
#concatenate all the DataFrames selected
return pd.concat(frames)
else:
return pd.DataFrame()
result = searchfood([], [1,100], 1, 'Rice', df)
#t = sort_by_location((441,430), result, infocan)
#t = sort_by_price(result)
#print(t)
| [
"noreply@github.com"
] | noreply@github.com |
d78302d7816ed974d3920bbf60e021cd1e37969a | ae3b1315aa3b2bce2c893bb7a53c34a6d14e6a65 | /myslabs/manage.py | 1b0e7153493dd7368ff12d456a7098aac43dd7aa | [] | no_license | Durga592/projects | a3948e9ccc5a677548fec78c555d631dee16049d | a6c4556fa1f41f8bf434d811f5ec9c8c38689849 | refs/heads/master | 2020-03-23T08:13:55.097014 | 2018-09-25T07:23:09 | 2018-09-25T07:23:09 | 141,315,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myslabs.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"pillamdurga592@gmail.com"
] | pillamdurga592@gmail.com |
5bd61c7c7efdab5323ed156dd3f98da017bb79ca | 0c13436580d409218a644df4bde0f9499ebdc4b4 | /CyberPi/Python with CyberPi 100(童芯派 ure 数学计算语音小助手 脱机运行版本).py | a105e0f9515395dd69328b71408065a64f789c7c | [
"MIT"
] | permissive | binfen1/PythonWithHardware | e4c3e32bc7326b9f84103bd4f55e82be2bbda6ba | 3e5ae890cb7a8e5e2c5a636092aca9ce21728ab2 | refs/heads/master | 2023-04-13T01:56:15.835900 | 2021-04-30T14:57:31 | 2021-04-30T14:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,454 | py | """"
名称:100 童芯派 ure 数学计算语音小助手 脱机运行版本
硬件: 童芯派
使用童芯派的语音识别功能,MicroPython的ure模块对识别的结果进行处理。
实现提问数学计算题,并得出结果的功能。
但是只能实现两个整数之间的加减乘除运算。
这个程序仅支持离线模式进行运行。
使用到的API及功能解读:
1.import ure
导入ure模块
2.get = ure.match('(\d+)(乘以|加|减|除以)(\d+)',result[0:-1])
根据定义的正则表达式对识别结果的数据进行处理。()的作用是对字符串进行分组。
并将结果存放在变量get当中。
3.get.group(1)
获取get变量当中指定分组的数据
难度:⭐⭐⭐⭐⭐
支持的模式:上传模式
无
"""
# ---------程序分割线----------------程序分割线----------------程序分割线----------
import cyberpi
import time
import random
import ure
c=0
cyberpi.cloud.setkey("请输入云服务授权码")
cyberpi.console.clear()
cyberpi.console.println("童芯派启动成功!")
cyberpi.wifi.connect("WiFi名称", "WiFi密码")
cyberpi.console.println("WIFI连接中...")
while not cyberpi.wifi.is_connect():
pass
cyberpi.led.on(0, 0, 255)
cyberpi.console.clear()
cyberpi.display.label("WiFi连接成功", 16, "center")
time.sleep(1)
while True:
if cyberpi.controller.is_press("a"):
cyberpi.console.clear()
cyberpi.console.println("开始语音识别")
cyberpi.led.on(0, 255, 0)
cyberpi.cloud.listen("chinese", 2)
result = cyberpi.cloud.listen_result()
cyberpi.console.println(result)
try:
get = ure.match('(\d+)(乘以|加|减|除以)(\d+)',result[0:-1])
a = int(get.group(1))
b = int(get.group(3))
calculate = get.group(2)
if '乘以' in calculate:
c = a * b
elif '除以' in calculate:
c = a / b
elif '加' in calculate:
c = a + b
elif '减' in calculate:
c = a - b
cyberpi.console.println('答案为'+str(c))
cyberpi.cloud.tts("chinese", '答案为'+str(c))
except BaseException:
cyberpi.console.println('数据不足或超出计算能力范围,请重新按下A键')
cyberpi.cloud.tts("chinese", "数据不足或超出计算能力范围,请重新按下A键")
| [
"36698850+SCSZCC@users.noreply.github.com"
] | 36698850+SCSZCC@users.noreply.github.com |
2d1bf385aa0af57dac548b94154d0021b5bcbf2c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_069/ch31_2019_09_28_01_31_28_102445.py | f9f4884fb86277a703c6d470865f3c6e798b155a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | valor = float(input('Qual o valor da casa?' ))
salario = float(input('Qual o seu salário? '))
tempo = int(input('Em quantos anos deseja pagar? '))
prestacao = valor/tempo*12
if salario >= 0.3*prestacao:
print ('Empréstimo aprovado')
else:
print ('Empréstimo não aprovado') | [
"you@example.com"
] | you@example.com |
3e023f193c68c9ab08ac4d6d2ac5f80e9f724559 | 28c598bf75f3ab287697c7f0ff1fb13bebb7cf75 | /starter.mmo/genesis/spell/spellmain.py | 170cb2340251bc0dc466557b56c4e020fc7eafc1 | [] | no_license | keaysma/solinia_depreciated | 4cb8811df4427261960af375cf749903d0ca6bd1 | 4c265449a5e9ca91f7acf7ac05cd9ff2949214ac | refs/heads/master | 2020-03-25T13:08:33.913231 | 2014-09-12T08:23:26 | 2014-09-12T08:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py |
from genesis.dbdict import DBSpellProto
from mud.world.defines import *
import invulnerability
| [
"mixxit@soliniaonline.com"
] | mixxit@soliniaonline.com |
4af02874af21fb00dd02a0dd7d887d70dff3dce6 | 9bae02fa92143f559e6005fc517062c25446d899 | /OpenPose/utils/openpose_net.py | f1aa9fa7906748d3bc7609c477fe1b1e5e2bb7e4 | [] | no_license | ark0723/MyProject | 09a7242421dbfadd8d84789987afb26ff69b6554 | b795cedca3a83ae65c3e5906251c700c68a4fed1 | refs/heads/master | 2022-06-04T22:20:34.161126 | 2022-05-11T03:39:49 | 2022-05-11T03:39:49 | 112,449,372 | 0 | 0 | null | 2022-03-07T19:03:59 | 2017-11-29T08:41:49 | Jupyter Notebook | UTF-8 | Python | false | false | 6,659 | py | # -*- coding: utf-8 -*-
"""openpose_net.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1pltq4eMG2H-0IrJXxbxkcTl6D5oqqIdD
# OPENPOSE
- consists of 7 modules: feature module, 6 stage modules
1. Feature module (VGG-19): 3x368x368 -> 128x46x46
2. Stage1: 128x46x46 -> PAFs(38x46x46) and heatmap(19x46x46)
3. stage2 ~ stage6: featuremap + PAFs + heatmap (185x46x46) -> PAFs(38x46x46) and heatmap(19x46x46)
"""
import torch
import torch.nn as nn
from torch.nn import init
import torchvision
class OpenPoseNet(nn.Module):
def __init__(self):
super(OpenPoseNet, self).__init__()
# feature module
self.model0 = OpenPose_Feature()
# stage 1-6
#PAFs
self.model1_1 = make_OpenPose_block('block1_1')
self.model2_1 = make_OpenPose_block('block2_1')
self.model3_1 = make_OpenPose_block('block3_1')
self.model4_1 = make_OpenPose_block('block4_1')
self.model5_1 = make_OpenPose_block('block5_1')
self.model6_1 = make_OpenPose_block('block6_1')
# confidence heatmap
self.model1_2 = make_OpenPose_block('block1_2')
self.model2_2 = make_OpenPose_block('block2_2')
self.model3_2 = make_OpenPose_block('block3_2')
self.model4_2 = make_OpenPose_block('block4_2')
self.model5_2 = make_OpenPose_block('block5_2')
self.model6_2 = make_OpenPose_block('block6_2')
def forward(self, x):
# input: x
# feature module
out1 = self.model0(x)
# stage1
out1_1 = self.model1_1(out1) #PAFs
out1_2 = self.model1_2(out1) # confidence heatmap
# stage2
out2 = torch.cat([out1_1, out1_2, out1], 1) # add channels: 185x46x46
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
# stage3
out3 = torch.cat([out2_1, out2_2, out1], 1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
# stage4
out4 = torch.cat([out3_1, out3_2, out1], 1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
# stage4
out5 = torch.cat([out4_1, out4_2, out1], 1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
# stage4
out6 = torch.cat([out5_1, out5_2, out1], 1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
# save the output from each stages
saved_for_loss = []
saved_for_loss.append(out1_1) #PAFs loss
saved_for_loss.append(out1_2) #Confidence Heatmap loss
saved_for_loss.append(out2_1)
saved_for_loss.append(out2_2)
saved_for_loss.append(out3_1)
saved_for_loss.append(out3_2)
saved_for_loss.append(out4_1)
saved_for_loss.append(out4_2)
saved_for_loss.append(out5_1)
saved_for_loss.append(out5_2)
saved_for_loss.append(out6_1)
saved_for_loss.append(out6_2)
return (out6_1, out6_2), saved_for_loss
"""# Implementation: Feature module
- VGG-19
"""
class OpenPose_Feature(nn.Module):
def __init__(self):
super(OpenPose_Feature, self).__init__()
# use the 10 convolutional layer from VGG-19 (including Relu and Pooling layer: total 23 layers)
vgg19 = torchvision.models.vgg19(pretrained= True)
model = {}
model['block0'] = vgg19.features[:23] # 10 convolutional layer from VGG-19
# add new two more convolutional layers
model['block0'].add_module("23", torch.nn.Conv2d(512, 256, kernel_size=3, stride = 1, padding = 1))
model['block0'].add_module("24", torch.nn.ReLU(inplace = True))
model['block0'].add_module("25", torch.nn.Conv2d(256, 128, kernel_size=3, stride =1, padding=1))
model['block0'].add_module("26", torch.nn.ReLU(inplace = True))
self.model = model['block0']
def forward(self, x):
output = self.model(x)
return output
"""# Stage Module Implementation
- stage 1: C3+R -> C3+R -> C3+R -> C1+R(512) -> C1 -> PAFs(38x46x46) or heatmap(19x46x46)
- stage 2~6: C7+R -> C7+R -> C7+R -> C7+R -> C7+R -> C7+R -> C1 -> PAFs(38x46x46) or heatmap(19x46x46) : # of filters does not change
"""
def make_OpenPose_block(block_name):
# make module dict
blocks = {}
# stage1 [input_size, output_size, kernel_size, stride, padding]
blocks['block1_1'] = [{'conv5_1_CPM_L1':[128,128,3,1,1]},
{'conv5_2_CPM_L1':[128,128,3,1,1]},
{'conv5_3_CPM_L1':[128,128,3,1,1]},
{'conv5_4_CPM_L1':[128,512,1,1,0]},
{'conv5_5_CPM_L1':[512,38,1,1,0]}] # PAFs
blocks['block1_2'] = [{'conv5_1_CPM_L2':[128,128,3,1,1]},
{'conv5_2_CPM_L2':[128,128,3,1,1]},
{'conv5_3_CPM_L2':[128,128,3,1,1]},
{'conv5_4_CPM_L2':[128,512,1,1,0]},
{'conv5_5_CPM_L2':[512,19,1,1,0]}] # Confidence Heatmap
# stage2~6
for i in range(2,7):
blocks['block%d_1' % i] = [
{'Mconv1_stage%d_L1' % i : [185, 128, 7, 1, 3]},
{'Mconv2_stage%d_L1' % i : [128, 128, 7, 1, 3]},
{'Mconv3_stage%d_L1' % i : [128, 128, 7, 1, 3]},
{'Mconv4_stage%d_L1' % i : [128, 128, 7, 1, 3]},
{'Mconv5_stage%d_L1' % i : [128, 128, 7, 1, 3]},
{'Mconv6_stage%d_L1' % i : [128, 128, 1, 1, 0]},
{'Mconv7_stage%d_L1' % i : [128, 38, 1, 1, 0]},
]
blocks['block%d_2' % i] = [
{'Mconv1_stage%d_L2' % i : [185, 128, 7, 1, 3]},
{'Mconv2_stage%d_L2' % i : [128, 128, 7, 1, 3]},
{'Mconv3_stage%d_L2' % i : [128, 128, 7, 1, 3]},
{'Mconv4_stage%d_L2' % i : [128, 128, 7, 1, 3]},
{'Mconv5_stage%d_L2' % i : [128, 128, 7, 1, 3]},
{'Mconv6_stage%d_L2' % i : [128, 128, 1, 1, 0]},
{'Mconv7_stage%d_L2' % i : [128, 19, 1, 1, 0]},
]
# load value of blocks[block_name]
layer_dict = blocks[block_name]
layers = []
for i in range(len(layer_dict)):
for key, val in layer_dict[i].items():
if 'pool' in key:
layers += [nn.MaxPool2d(kernel_size = val[0], stride = val[1], padding = val[2])]
else:
conv2d = nn.Conv2d(in_channels=val[0], out_channels=val[1], kernel_size=val[2], stride=val[3], padding=val[4])
layers += [conv2d, nn.ReLU(inplace = True)]
# nn.Sequential: layers
net = nn.Sequential(*layers[:-1]) # for the last layer: we do not need 'ReLu'
# initialize weight
def _init_weights_norm(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.normal_(m.weight, std = 0.01)
if m.bias is not None:
init.constant_(m.bias, 0.0)
net.apply(_init_weights_norm)
return net
'''
# test model
net = OpenPoseNet()
net.train()
# create image data
batch_size = 2
new_img = torch.rand(batch_size, 3, 368, 368)
# output
out = net(new_img)
print(out)
'''
| [
"ark0723@gmail.com"
] | ark0723@gmail.com |
4db29c010ff2a27a08eb9c384fd84310ac6fabac | afa5a0272775814beca41aec74ede64aa767688b | /SearchFiles2.py | b307e86a439d7a078bdcfb321174ed3483e7f1b8 | [] | no_license | WinterSong/webbbbb | 2c9e04b525ecf95d86559f24daf6ae3ba3157ace | bc5ece4bcfb541baa1fe734c78558c8521f809d0 | refs/heads/master | 2021-01-22T23:20:44.478721 | 2014-11-07T14:11:05 | 2014-11-07T14:11:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,140 | py | #!/usr/bin/env python
#coding:gbk
from lucene import \
QueryParser, IndexSearcher, StandardAnalyzer, SimpleFSDirectory, File, \
VERSION, initVM, Version, BooleanQuery, BooleanClause
import lucene
import jieba
"""
This script is loosely based on the Lucene (java implementation) demo class
org.apache.lucene.demo.SearchFiles. It will prompt for a search query, then it
will search the Lucene index in the current directory called 'index' for the
search query entered against the 'contents' field. It will then display the
'path' and 'name' fields for each of the hits it finds in the index. Note that
search.close() is currently commented out because it causes a stack overflow in
some cases.
"""
def run(command,searcher, analyzer):
#while True:
#print
#print "Hit enter with no input to quit."
#command = raw_input("Query:")
#command = unicode(command,'gbk')
command = command.decode('utf-8')
if command == '':
return
#print
#print "Searching for:",
querys = BooleanQuery()
for i in jieba.cut(command):
#print i,
query = QueryParser(Version.LUCENE_CURRENT, "contents",
analyzer).parse(i)
querys.add(query, BooleanClause.Occur.MUST)
scoreDocs = searcher.search(querys, 50).scoreDocs
#print "\n%s total matching documents." % len(scoreDocs)
list1 = []
for scoreDoc in scoreDocs:
doc = searcher.doc(scoreDoc.doc)
list1.append(doc)
return list1
# print 'path:', doc.get("path"),'title:',doc.get('title'),\
# 'url:',doc.get('url'),'name:', doc.get("name")
vm_env = 0
#if __name__ == '__main__':
def begining(command):
STORE_DIR = "index"
global vm_env
vm_env = initVM()
vm_env.attachCurrentThread()
#print 'lucene', VERSION
directory = SimpleFSDirectory(File(STORE_DIR))
searcher = IndexSearcher(directory, True)
analyzer = lucene.WhitespaceAnalyzer(Version.LUCENE_CURRENT)
a = run(command, searcher, analyzer)
searcher.close()
return a
| [
"ol7650@sjtu.edu.cn"
] | ol7650@sjtu.edu.cn |
5fef8ea7f91e094835ace56319fab0b154591baf | 18ca2e0f98b98941ff9d9e098e0be89166c8b87c | /Abp/Cp17/c17_7_1_resizeAndAddLogo2.py | 1e9e0f0d7ca76bb52331717c0a1cfcf67a729979 | [] | no_license | masa-k0101/Self-Study_python | f20526a9cd9914c9906059678554285bfda0c932 | 72b364ad4da8485a201ebdaaa430fd2e95681b0a | refs/heads/master | 2023-03-07T07:38:27.559606 | 2021-02-22T16:24:47 | 2021-02-22T16:24:47 | 263,381,292 | 1 | 0 | null | 2020-06-09T17:32:06 | 2020-05-12T15:47:48 | Python | UTF-8 | Python | false | false | 1,571 | py | #! python3
# -*- coding: utf-8 -*-
# 演習プロジェクト 17.7.1用に改造
# resizeAndAddLogo2.py - カレントディレクトリのすべての画像を300x300に収まる
# ようにサイズ変更し、catlogo.pngを右下に追加する。
import os
from PIL import Image
SQUARE_FIT_SIZE = 300
LOGO_FILENAME = 'catlogo.png'
logo_im = Image.open(LOGO_FILENAME)
logo_width, logo_height = logo_im.size
os.makedirs('withLogo', exist_ok=True)
# カレントディレクトリの全画像をループする
for filename in os.listdir('.'):
# 拡張子の大文字と小文字を区別しない(小文字に変換してマッチする)
lfname = filename.lower()
# PNG, JPG, GIF, BMPファイル以外ならスキップする
if not (lfname.endswith('.png') or lfname.endswith('.jpg') \
or lfname.endswith('.gif') or lfname.endswith('.bmp')) \
or lfname == LOGO_FILENAME:
continue # 画像以外とロゴ画像はスキップする
im = Image.open(filename)
# 画像をサイズ変更する
im.thumbnail((SQUARE_FIT_SIZE, SQUARE_FIT_SIZE))
width, height = im.size
# ロゴの2倍サイズ未満なら、スキップする
if width < logo_width * 2 or height < logo_height * 2:
continue
# ロゴを追加する
print('ロゴを追加中 {}...'.format(filename))
im.paste(logo_im, (width-logo_width, height-logo_height), logo_im)
# 変更を保存する
im.save(os.path.join('withLogo', filename)) | [
"noreply@github.com"
] | noreply@github.com |
c22c01818686115aaa4f416dc26874227498f59a | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/004621ef8e2f9da82e0ed2be016e874230d93a0d-<profiles>-fix.py | a3ec5d59222b96ebf703c7962993fe499e3d1581 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | @property
def profiles(self):
'Returns a list of profiles from the API\n\n The profiles are formatted so that they are usable in this module and\n are able to be compared by the Difference engine.\n\n Returns:\n list (:obj:`list` of :obj:`dict`): List of profiles.\n\n Each dictionary in the list contains the following three (3) keys.\n\n * name\n * context\n * fullPath\n\n Raises:\n F5ModuleError: If the specified context is a value other that\n ``all``, ``server-side``, or ``client-side``.\n '
if ('items' not in self._values['profiles']):
return None
result = []
for item in self._values['profiles']['items']:
context = item['context']
if (context == 'serverside'):
context = 'server-side'
elif (context == 'clientside'):
context = 'client-side'
name = item['name']
if (context in ['all', 'server-side', 'client-side']):
result.append(dict(name=name, context=context, full_path=item['fullPath']))
else:
raise F5ModuleError("Unknown profile context found: '{0}'".format(context))
return result | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
499dac698c394953c5ffd843f634492c13239252 | 7c235b03e1a20653fe6312a5fe8d4893f4e4d8d5 | /manage.py | f14e58b2973ae14082ae86a197a6425935308cad | [
"MIT"
] | permissive | wwangwe/Django-Forms | b143eca6e715f69fa11c5fc775140257510abc80 | ce38d711271e2c999a767acdeb60978bb37e77c1 | refs/heads/master | 2022-11-17T05:49:56.459768 | 2020-07-18T06:24:04 | 2020-07-18T06:24:04 | 280,128,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dforms.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"wangwetymothy@gmail.com"
] | wangwetymothy@gmail.com |
f1be83c2dcca72d6de7b4abaf1022f182b522ed1 | 2cbfac9fe3650a35fc52054ca30114f0af075634 | /whoYouServer/asgi.py | eb0ab884e1a641f6ce3ac8129fa94de197921290 | [] | no_license | mgjeffries/WhoYou-Server | 7b9bea5f4b288bc5778ac70f086a69fb974f5499 | 54d6c2922d80e9360e31e3a6e6921b7742c27cfe | refs/heads/main | 2023-02-24T13:35:51.093925 | 2021-01-29T21:55:36 | 2021-01-29T21:55:36 | 320,330,393 | 0 | 0 | null | 2021-01-29T21:55:37 | 2020-12-10T16:36:53 | Python | UTF-8 | Python | false | false | 401 | py | """
ASGI config for whoYouServer project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'whoYouServer.settings')
application = get_asgi_application()
| [
"gib.jeffries@gmail.com"
] | gib.jeffries@gmail.com |
149cbde05cc6385c66a90062e7ac22763bf9aed1 | a03a7935a191d63bee76fd3b85a61ee27f98904a | /test/tests/databases/pdbdatabase.py | 819adafd698290f378a6eb7b80bb41c8c6c1bf27 | [] | no_license | cchriste/visit | 57091c4a512ab87efd17c64c7494aa4cf01b7e53 | c72c413f571e56b52fb7221955219f11f4ba19e3 | refs/heads/master | 2020-04-12T06:25:27.458132 | 2015-10-12T15:41:49 | 2015-10-12T15:41:49 | 10,111,791 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,311 | py | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: pdbdatabase.py
#
# Tests: mesh - 2D,3D curvilinear, single domain
# plots - Pseudocolor, Subset, Vector
# operators - Clip
#
# Programmer: Brad Whitlock
# Date: Thu Sep 25 09:31:28 PDT 2003
#
# Modifications:
# Brad Whitlock, Wed Mar 31 09:11:08 PDT 2004
# I added code to clear the engine cache to reduce memory usage.
#
# Brad Whitlock, Fri Apr 9 16:54:15 PST 2004
# I added TestSection to divide up the tests a little.
#
# Brad Whitlock, Thu Sep 2 12:08:59 PDT 2004
# I replaced some deprecated calls with their new equivalents.
#
# Brad Whitlock, Tue Dec 7 17:52:33 PST 2004
# I added a test for mixvars in Flash files.
#
# Mark C. Miller, Sat Feb 3 00:42:05 PST 2007
# Added tests for array variables
# ----------------------------------------------------------------------------
##
## This creates a name for a test.
##
def CreateTestName(testName, testIndex):
name = "%s_%02d" % (testName, testIndex)
return name
def sv3():
v3 = View3DAttributes()
v3.viewNormal = (0.516282, 0.582114, 0.628169)
v3.focus = (0, 0, 0)
v3.viewUp = (-0.488576, 0.80261, -0.342213)
v3.viewAngle = 30
v3.parallelScale = 43.589
v3.nearPlane = -87.178
v3.farPlane = 87.178
v3.imagePan = (0, 0)
v3.imageZoom = 1.41577
v3.perspective = 1
SetView3D(v3)
##
## This function performs the test using the specified database.
##
def TestWithDatabase(db, testName):
TestSection("Testing with %s" % db)
# Open the test database
OpenDatabase(db)
##
## Do the 2D tests.
##
# Add the plots.
AddPlot("Subset", "material(mesh)")
DrawPlots()
# Do the first test in the series
Test(CreateTestName(testName, 0))
SetTimeSliderState(6)
Test(CreateTestName(testName, 1))
SetTimeSliderState(15)
Test(CreateTestName(testName, 2))
# Do a test on the last frame in the animation.
SetTimeSliderState(22)
Test(CreateTestName(testName, 3))
AddPlot("Mesh", "mesh")
DrawPlots()
v = View2DAttributes()
v.windowCoords = (-6.07862, -0.374491, 4.48986, 10.8545)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
SetView2D(v)
Test(CreateTestName(testName, 4))
# Try turning off material 2
SetActivePlots((0,1))
TurnMaterialsOff("2")
Test(CreateTestName(testName, 5))
TurnMaterialsOn()
ResetView()
DeleteAllPlots()
AddPlot("Pseudocolor", "mesh/a")
DrawPlots()
Test(CreateTestName(testName, 6))
# Define a expression. I'm testing this because of the strange
# <mesh/var> syntax that my plugin has.
DefineVectorExpression("testexp1", "3.0 * {<mesh/lt>, <mesh/a>/399.0}")
AddPlot("Vector", "testexp1")
DrawPlots();
vec = VectorAttributes()
vec.nVectors = 1200
vec.colorByMag = 0
SetPlotOptions(vec)
v.windowCoords = (-9.51217, -0.289482, 0.983025, 10.6717)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
SetView2D(v)
Test(CreateTestName(testName, 7))
# Set the time back to frame 0
SetTimeSliderState(0)
ResetView()
DeleteAllPlots()
##
## Do the 3D tests.
##
AddPlot("Subset", "material2(revolved_mesh)")
AddOperator("Clip")
c = ClipAttributes()
c.funcType = c.Plane
c.plane1Status = 0
c.plane2Status = 1
c.plane3Status = 1
SetOperatorOptions(c)
DrawPlots()
# Set the view
sv3()
Test(CreateTestName(testName, 8))
SetTimeSliderState(6)
sv3()
Test(CreateTestName(testName, 9))
SetTimeSliderState(15)
sv3()
Test(CreateTestName(testName, 10))
# Do a test on the last frame in the animation.
SetTimeSliderState(22)
sv3()
Test(CreateTestName(testName, 11))
# Turn off some materials
TurnMaterialsOff(("1", "3", "4"))
sv3()
Test(CreateTestName(testName, 12))
TurnMaterialsOn()
# Set the time back to frame 2
SetTimeSliderState(2)
ResetView()
DeleteAllPlots()
#
# Test array variables
#
AddPlot("Pseudocolor","logical_mesh/marray_comps/comp_002")
DrawPlots()
Test(CreateTestName(testName, 13))
DeleteAllPlots()
ResetView()
AddPlot("Pseudocolor","revolved_mesh/marray_comps/comp_002")
DrawPlots()
Test(CreateTestName(testName, 14))
DeleteAllPlots()
ResetView()
AddPlot("Label","logical_mesh/marray")
DrawPlots()
Test(CreateTestName(testName, 15))
# Set the time back to frame 0
SetTimeSliderState(0)
ResetView()
DeleteAllPlots()
CloseDatabase(db)
ClearCache("localhost")
#
# Test mixvars.
#
def TestMixvars(db):
TestSection("Testing mixvars in Flash files")
DeleteAllPlots()
OpenDatabase(db)
AddPlot("Pseudocolor", "mesh/mixvar")
DrawPlots()
ResetView()
v = View2DAttributes()
v.windowCoords = (-9.51866, 3.29394, 13.9258, 26.4126)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
v.fullFrameActivationMode = v.Off
SetView2D(v)
Test("pdb_nomix")
# Do the same plot but with forced MIR so the mixvar gets reconstructed.
ClearWindow()
m = GetMaterialAttributes()
m.forceMIR = 1
SetMaterialAttributes(m)
DrawPlots()
Test("pdb_mix")
DeleteAllPlots()
#
# Run the test a few times with different versions of the database. We do this
# because we have the same database split up three different ways and all the
# ways a database can be split up must work.
#
# multi{00,01,02}.pdb - Contains multiple time states in each file but
# we group them all into "multi*.pdb database".
#
# family??.pdb - Contains a single time state in each file but
# we group them all into "family*.pdb database".
#
# allinone00.pdb - Contains all of the time states in one file.
#
databases = (data_path("pdb_test_data/multi*.pdb database"),
data_path("pdb_test_data/family*.pdb database"),
data_path("pdb_test_data/allinone00.pdb"))
testNames = ("pdb_multi", "pdb_family", "pdb_allinone")
for i in range(len(databases)):
TestWithDatabase(databases[i], testNames[i])
# Do the mixvar test.
TestMixvars(databases[2])
Exit()
| [
"bonnell@18c085ea-50e0-402c-830e-de6fd14e8384"
] | bonnell@18c085ea-50e0-402c-830e-de6fd14e8384 |
2a37b57347e8945b94ea8041f9511b3b88e12a17 | 72af8e47d5786571bce1789fc047965de4f9ac92 | /api/__init__.py | ad16f4c6968b0f0d9cc72ec542f4a5cc4cc4663a | [] | no_license | 444thLiao/WES_pipelines | 18d488e7c01ca618b8a6916979e2d8f64d1aa631 | 06365dc6d91b8c1861c053970e2823c322e5814d | refs/heads/master | 2022-01-20T17:59:11.688758 | 2019-07-17T06:43:44 | 2019-07-17T06:43:44 | 93,579,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import sys
from os.path import dirname
sys.path.insert(0,dirname(dirname(__file__)))
from luigi_pipelines.share_luigi_tasks import PrintReads, Annovar1, Annovar2
| [
"l0404th@gmail.com"
] | l0404th@gmail.com |
31dc0215be6259d4808edc3c665771c8756d1ffe | 5715ae1bdb14f2f736990f77a7a2fc4ffd8886df | /4.2join.py | fb8ea26f7c0f677a87e35b42f5eae8d4affaf055 | [] | no_license | simgek/simge4.2 | 00da58a3a08fe7d60c3aa2483e3daad5fe54df74 | 08fb73875f2b2565119af29e20392a2427952106 | refs/heads/master | 2020-06-16T16:52:06.763892 | 2019-07-07T11:13:44 | 2019-07-07T11:13:44 | 195,641,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | # .join metinsel değerleri bibirine eklemek için kullanılır seperator verilebilir
metin = list(input("lütfen bir metin giriniz : "))
print("-".join(metin)) | [
"simge.karademir@hotmail.com"
] | simge.karademir@hotmail.com |
d9df56bc05415594d4dace334232f9d8315b1664 | 3724a1b95e95e611cdd793d1af685f72dfea6b3e | /cloudcafe/networking/lbaas/lbaas_api/member/request.py | 50bac7f74346701af31e6329bccdc59bba639105 | [
"Apache-2.0"
] | permissive | kurhula/cloudcafe | 1b1e41994959cf959a49e19fea5cbda893d9c9df | 7d49cf6bfd7e1a6e5b739e7de52f2e18e5ccf924 | refs/heads/master | 2021-01-20T22:45:27.425724 | 2015-02-20T16:49:35 | 2015-02-20T16:49:35 | 31,156,531 | 0 | 1 | null | 2015-02-22T07:56:08 | 2015-02-22T07:56:07 | null | UTF-8 | Python | false | false | 5,537 | py | """
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import xml.etree.ElementTree as ET
from cafe.engine.models.base import AutoMarshallingModel
from cloudcafe.networking.lbaas.common.constants import Constants
class CreateMember(AutoMarshallingModel):
""" Create Member Request Model
@summary: An object that represents the the request data of a Member.
Members are individual backend services which are being load balanced.
Usually these would be web application servers. They are represented
as a pool, IP address, Layer 4 port tuple.
json ex:
{
"member": {
"subnet_id": "SUBNET_ID",
"tenant_id": "453105b9-1754-413f-aab1-55f1af620750",
"address": "192.0.2.14",
"protocol_port": 8080,
"weight": 7,
"admin_state_up": false
}
}
xml ex:
<member xmlns=""
subnet_id="SUBNET_ID"
tenant_id="453105b9-1754-413f-aab1-55f1af620750"
address="192.0.2.14"
protocol_port="8080"
weight="7"
admin_state_up="false"
/>
"""
ROOT_TAG = 'member'
def __init__(self, subnet_id, tenant_id, address, protocol_port,
weight=None, admin_state_up=None):
"""
@summary: Create Member Object Model
@param subnet_id: Subnet in which to access this member.
@type subnet_id: str
@param tenant_id: Tenant to which this member is owned.
@type tenant_id: str
@param address: IP address of pool member.
@type address: str
@param protocol_port: TCP or UDP port
@type protocol_port: int
@param weight: Positive integer indicating relative portion of
traffic from pool this member should receive (e.g., a member with
a weight of 10 will receive five times as much traffic as a
member with weight 2)
Default: 1
@type weight: int
@param admin_state_up: If set to False, member will be created in an
administratively down state
Default: True
@type admin_state_up: bool
@return: Create Member Object
@rtype: CreateMember
"""
self.subnet_id = subnet_id
self.tenant_id = tenant_id
self.address = address
self.protocol_port = protocol_port
self.weight = weight
self.admin_state_up = admin_state_up
def _obj_to_json(self):
body = {
'subnet_id': self.subnet_id,
'tenant_id': self.tenant_id,
'address': self.address,
'protocol_port': self.protocol_port,
'weight': self.weight,
'admin_state_up': self.admin_state_up
}
body = self._remove_empty_values(body)
main_body = {self.ROOT_TAG: body}
return json.dumps(main_body)
def _obj_to_xml(self):
xml = Constants.XML_HEADER
element = ET.Element(self.ROOT_TAG)
element.set('xmlns', Constants.XML_API_NAMESPACE)
element.set('subnet_id', self.subnet_id)
element.set('tenant_id', self.tenant_id)
element.set('address', self.address)
element.set('protocol_port', str(self.protocol_port))
if self.weight is not None:
element.set('weight', str(self.weight))
if self.admin_state_up is not None:
element.set('admin_state_up', str(self.admin_state_up))
xml = "{0}{1}".format(xml, ET.tostring(element))
return xml
class UpdateMember(AutoMarshallingModel):
""" Update Member Request Model
@summary: An object that represents the the request data of updating a
Member. This is used in updating an existing Member.
json ex:
{
"member": {
"weight": 7,
"admin_state_up": false
}
}
xml ex:
<member xmlns=""
weight="7"
admin_state_up="False" />
"""
ROOT_TAG = CreateMember.ROOT_TAG
def __init__(self, weight=None, admin_state_up=None):
self.weight = weight
self.admin_state_up = admin_state_up
self.attr_dict = {
'weight': self.weight,
'admin_state_up': self.admin_state_up
}
def _obj_to_json(self):
body = self._remove_empty_values(self.attr_dict)
return json.dumps({self.ROOT_TAG: body})
def _obj_to_xml(self):
xml = Constants.XML_HEADER
element = ET.Element(self.ROOT_TAG)
element.set('xmlns', Constants.XML_API_NAMESPACE)
element_dict = self.attr_dict
# cast non-strings into strings
element_dict['weight'] = str(element_dict['weight'])
element_dict['admin_state_up'] = str(element_dict['admin_state_up'])
element = self._set_xml_etree_element(element, element_dict)
xml = "{0}{1}".format(xml, ET.tostring(element))
return xml
| [
"franklin.naval@RACKSPACE.COM"
] | franklin.naval@RACKSPACE.COM |
7c5d0a108ce1a97c84572b1c05a6cc3f1bb972a3 | 5a4ae6581fa70025a3c6cd4a8d8b0e179f10a7dc | /tests/past_api09_temptable.py | 4fe41861e916ce3471721c4d49d36b41444d3818 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | Dev4Data/datatest | b250a465f4c313ebe8d59a1d273e4e1ce4f86619 | bf136eab23c2b6ea36c201e1446fca9243c3fba6 | refs/heads/master | 2023-08-03T00:36:34.362741 | 2021-12-05T17:44:33 | 2021-12-05T17:44:33 | 136,925,059 | 0 | 0 | null | 2018-06-11T12:44:50 | 2018-06-11T12:44:50 | null | UTF-8 | Python | false | false | 18,160 | py | # -*- coding: utf-8 -*-
import itertools
import sqlite3
import unittest
import datatest._vendor.temptable as temptable
from datatest._compatibility import collections
from datatest._vendor.temptable import (
table_exists,
new_table_name,
normalize_names,
normalize_default,
create_table,
get_columns,
insert_records,
alter_table,
drop_table,
savepoint,
load_data,
)
class TestTableExists(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_empty_database(self):
self.assertFalse(table_exists(self.cursor, 'table_a'))
def test_persistent_table(self):
self.cursor.execute('CREATE TABLE table_b (col1, col2)')
self.assertTrue(table_exists(self.cursor, 'table_b'))
def test_temporary_table(self):
self.cursor.execute('CREATE TEMPORARY TABLE table_c (col1, col2)')
self.assertTrue(table_exists(self.cursor, 'table_c'))
class TestNewTableName(unittest.TestCase):
def setUp(self):
# Rebuild internal generator.
temptable._table_names = ('tbl{0}'.format(x) for x in itertools.count())
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_empty_database(self):
table_name = new_table_name(self.cursor)
self.assertEqual(table_name, 'tbl0')
def test_existing_temptable(self):
self.cursor.execute('CREATE TEMPORARY TABLE tbl0 (col1, col2)')
table_name = new_table_name(self.cursor)
self.assertEqual(table_name, 'tbl1')
def test_existing_table_and_temptable(self):
self.cursor.execute('CREATE TABLE tbl0 (col1, col2)')
self.cursor.execute('CREATE TEMPORARY TABLE tbl1 (col1, col2)')
table_name = new_table_name(self.cursor)
self.assertEqual(table_name, 'tbl2')
class TestNormalizeNames(unittest.TestCase):
def test_single_value(self):
normalized = normalize_names('A')
self.assertEqual(normalized, '"A"')
def test_list_of_values(self):
normalized = normalize_names(['A', 'B'])
expected = ['"A"', '"B"']
self.assertEqual(normalized, expected)
def test_non_strings(self):
normalized = normalize_names(2.5)
self.assertEqual(normalized, '"2.5"')
def test_whitespace(self):
normalized = normalize_names(' A ')
self.assertEqual(normalized, '"A"')
normalized = normalize_names(' ')
self.assertEqual(normalized, '""')
def test_quote_escaping(self):
normalized = normalize_names('Steve "The Woz" Wozniak')
self.assertEqual(normalized, '"Steve ""The Woz"" Wozniak"')
class TestNormalizeDefault(unittest.TestCase):
def test_none(self):
normalized = normalize_default(None)
self.assertEqual(normalized, 'NULL')
def test_expression(self):
expression = "(datetime('now'))"
normalized = normalize_default(expression)
self.assertEqual(normalized, expression)
def test_number_or_literal(self):
normalized = normalize_default(7)
self.assertEqual(normalized, '7')
normalized = normalize_default('foo')
self.assertEqual(normalized, "'foo'")
normalized = normalize_default('')
self.assertEqual(normalized, "''")
class TestCreateTable(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def count_tables(self): # <- Heper function.
self.cursor.execute('''
SELECT COUNT(*)
FROM sqlite_temp_master
WHERE type='table'
''')
return self.cursor.fetchone()[0]
def test_basic_creation(self):
self.assertEqual(self.count_tables(), 0, msg='starting with zero tables')
create_table(self.cursor, 'test_table1', ['A', 'B']) # <- Create table!
self.assertEqual(self.count_tables(), 1, msg='one table')
create_table(self.cursor, 'test_table2', ['A', 'B']) # <- Create table!
self.assertEqual(self.count_tables(), 2, msg='two tables')
def test_default_value(self):
# When unspecified, default is empty string.
create_table(self.cursor, 'test_table1', ['A', 'B'])
self.cursor.execute("INSERT INTO test_table1 (A) VALUES ('foo')")
self.cursor.execute("INSERT INTO test_table1 (B) VALUES ('bar')")
self.cursor.execute('SELECT * FROM test_table1')
expected = [
('foo', ''), # <- Default in column B
('', 'bar'), # <- Default in column A
]
self.assertEqual(self.cursor.fetchall(), expected)
# Setting default to None.
create_table(self.cursor, 'test_table2', ['A', 'B'], default=None)
self.cursor.execute("INSERT INTO test_table2 (A) VALUES ('foo')")
self.cursor.execute("INSERT INTO test_table2 (B) VALUES ('bar')")
self.cursor.execute('SELECT * FROM test_table2')
expected = [
('foo', None), # <- Default in column B
(None, 'bar'), # <- Default in column A
]
self.assertEqual(self.cursor.fetchall(), expected)
def test_sqlite3_errors(self):
"""Sqlite errors should not be caught."""
# Table already exists.
create_table(self.cursor, 'test_table1', ['A', 'B'])
with self.assertRaises(sqlite3.OperationalError):
create_table(self.cursor, 'test_table1', ['A', 'B'])
# Duplicate column name.
with self.assertRaises(sqlite3.OperationalError):
create_table(self.cursor, 'test_table2', ['A', 'B', 'A'])
# Duplicate column name (after normalization).
with self.assertRaises(sqlite3.OperationalError):
create_table(self.cursor, 'test_table3', ['A', 'B', ' A '])
# Duplicate empty/all-whitespace string columns (uses modified message).
with self.assertRaises(sqlite3.OperationalError) as cm:
create_table(self.cursor, 'test_table4', ['', 'B', ' '])
class TestGetColumns(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_get_columns(self):
self.cursor.execute('CREATE TABLE test1 ("A", "B")')
columns = get_columns(self.cursor, 'test1')
self.assertEqual(columns, ['A', 'B'])
self.cursor.execute('CREATE TEMPORARY TABLE test2 ("C", "D")')
columns = get_columns(self.cursor, 'test2')
self.assertEqual(columns, ['C', 'D'])
def test_missing_table(self):
with self.assertRaises(sqlite3.ProgrammingError):
columns = get_columns(self.cursor, 'missing_table')
class TestInsertRecords(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_basic_insert(self):
cursor = self.cursor
cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
records = [
('x', 1),
('y', 2),
]
insert_records(cursor, 'test_table', ['A', 'B'], records)
cursor.execute('SELECT * FROM test_table')
results = cursor.fetchall()
self.assertEqual(results, records)
def test_reordered_columns(self):
cursor = self.cursor
cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
records = [
(1, 'x'),
(2, 'y'),
]
columns = ['B', 'A'] # <- Column order doesn't match how table was created.
insert_records(cursor, 'test_table', columns, records)
cursor.execute('SELECT * FROM test_table')
results = cursor.fetchall()
expected = [
('x', 1),
('y', 2),
]
self.assertEqual(results, expected)
def test_wrong_number_of_values(self):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
too_few = [('x',), ('y',)]
with self.assertRaises(sqlite3.ProgrammingError):
insert_records(self.cursor, 'test_table', ['A', 'B'], too_few)
too_many = [('x', 1, 'foo'), ('y', 2, 'bar')]
with self.assertRaises(sqlite3.ProgrammingError):
insert_records(self.cursor, 'test_table', ['A', 'B'], too_many)
def test_no_records(self):
cursor = self.cursor
cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
records = iter([]) # <- Empty, no data.
insert_records(cursor, 'test_table', ['A', 'B'], records)
cursor.execute('SELECT * FROM test_table')
results = cursor.fetchall()
self.assertEqual(results, [])
def test_sqlite3_errors(self):
"""Sqlite errors should not be caught."""
# No such table.
with self.assertRaises(sqlite3.OperationalError):
records = [('x', 1), ('y', 2)]
insert_records(self.cursor, 'missing_table', ['A', 'B'], records)
# No column named X.
with self.assertRaises(sqlite3.OperationalError):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
records = [('a', 1), ('b', 2)]
insert_records(self.cursor, 'test_table', ['X', 'B'], records)
class TestAlterTable(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_new_columns(self):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
alter_table(self.cursor, 'test_table', ['C', 'D', 'E'])
columns = get_columns(self.cursor, 'test_table')
self.assertEqual(columns, ['A', 'B', 'C', 'D', 'E'])
def test_existing_columns(self):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
alter_table(self.cursor, 'test_table', ['A', 'B', 'C', 'D'])
columns = get_columns(self.cursor, 'test_table')
self.assertEqual(columns, ['A', 'B', 'C', 'D'])
def test_ordering_behavior(self):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
alter_table(self.cursor, 'test_table', ['B', 'C', 'A', 'D'])
# Columns A and B already exist in a specified order and
# the new columns ('C' and 'D') are added in the order in
# which they are encountered.
columns = get_columns(self.cursor, 'test_table')
self.assertEqual(columns, ['A', 'B', 'C', 'D'])
class TestDropTable(unittest.TestCase):
def test_drop_table(self):
connection = sqlite3.connect(':memory:')
cursor = connection.cursor()
cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
self.assertTrue(table_exists(cursor, 'test_table'))
drop_table(cursor, 'test_table') # <- Drop table!
self.assertFalse(table_exists(cursor, 'test_table'))
class TestSavepoint(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
connection.isolation_level = None
self.cursor = connection.cursor()
def test_transaction_status(self):
connection = self.cursor.connection
if not hasattr(connection, 'in_transaction'): # New in 3.2.
return
self.assertFalse(connection.in_transaction)
with savepoint(self.cursor):
self.assertTrue(connection.in_transaction)
self.assertFalse(connection.in_transaction)
def test_release(self):
cursor = self.cursor
with savepoint(cursor):
cursor.execute('CREATE TEMPORARY TABLE test_table ("A")')
cursor.execute("INSERT INTO test_table VALUES ('one')")
cursor.execute("INSERT INTO test_table VALUES ('two')")
cursor.execute("INSERT INTO test_table VALUES ('three')")
cursor.execute('SELECT * FROM test_table')
self.assertEqual(cursor.fetchall(), [('one',), ('two',), ('three',)])
def test_nested_releases(self):
cursor = self.cursor
with savepoint(cursor):
cursor.execute('CREATE TEMPORARY TABLE test_table ("A")')
cursor.execute("INSERT INTO test_table VALUES ('one')")
with savepoint(cursor): # <- Nested!
cursor.execute("INSERT INTO test_table VALUES ('two')")
cursor.execute("INSERT INTO test_table VALUES ('three')")
cursor.execute('SELECT * FROM test_table')
self.assertEqual(cursor.fetchall(), [('one',), ('two',), ('three',)])
def test_rollback(self):
cursor = self.cursor
with savepoint(cursor): # <- Released.
cursor.execute('CREATE TEMPORARY TABLE test_table ("A")')
try:
with savepoint(cursor): # <- Rolled back!
cursor.execute("INSERT INTO test_table VALUES ('one')")
cursor.execute("INSERT INTO test_table VALUES ('two')")
cursor.execute("INSERT INTO missing_table VALUES ('three')") # <- Bad table.
except sqlite3.OperationalError:
pass
cursor.execute('SELECT * FROM test_table')
self.assertEqual(cursor.fetchall(), [], 'Table should exist but contain no records.')
def test_nested_rollback(self):
cursor = self.cursor
with savepoint(cursor): # <- Released.
cursor.execute('CREATE TEMPORARY TABLE test_table ("A")')
cursor.execute("INSERT INTO test_table VALUES ('one')")
try:
with savepoint(cursor): # <- Nested rollback!
cursor.execute("INSERT INTO test_table VALUES ('two')")
raise Exception()
except Exception:
pass
cursor.execute("INSERT INTO test_table VALUES ('three')")
cursor.execute('SELECT * FROM test_table')
self.assertEqual(cursor.fetchall(), [('one',), ('three',)])
def test_bad_isolation_level(self):
connection = sqlite3.connect(':memory:')
connection.isolation_level = 'DEFERRED' # <- Expects None/autocommit!
cursor = connection.cursor()
with self.assertRaises(ValueError):
with savepoint(cursor):
pass
class TestLoadData(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
connection.isolation_level = None
self.cursor = connection.cursor()
try:
self.dict_constructor = collections.OrderedDict # New in 2.7
except AttributeError:
self.dict_constructor = dict
def test_four_args(self):
columns = ['A', 'B']
records = [
('x', 1),
('y', 2),
]
load_data(self.cursor, 'testtable', columns, records) # <- Four args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_four_args_mappings(self):
columns = ['A', 'B']
records = [
self.dict_constructor([('A', 'x'), ('B', 1)]),
self.dict_constructor([('B', 2), ('A', 'y')]), # <- Different key order.
]
load_data(self.cursor, 'testtable', columns, records) # <- Four args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_three_args(self):
records = [
['A', 'B'], # <- Used as header row.
('x', 1),
('y', 2),
]
load_data(self.cursor, 'testtable', records) # <- Three args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_three_args_mappings(self):
records = [
self.dict_constructor([('A', 'x'), ('B', 1)]),
self.dict_constructor([('B', 2), ('A', 'y')]), # <- Different key order.
]
load_data(self.cursor, 'testtable', records) # <- Three args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_three_args_namedtuples(self):
ntup = collections.namedtuple('ntup', ['A', 'B'])
records = [
ntup('x', 1),
ntup('y', 2),
]
load_data(self.cursor, 'testtable', records) # <- Three args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_column_default(self):
load_data(self.cursor, 'testtable1', ['A', 'B'], [('x', 1)])
load_data(self.cursor, 'testtable1', ['A'], [('y',)])
load_data(self.cursor, 'testtable1', ['B'], [(3,)])
self.cursor.execute('SELECT A, B FROM testtable1')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', ''), ('', 3)])
load_data(self.cursor, 'testtable2', ['A', 'B'], [('x', 1)], default=None)
load_data(self.cursor, 'testtable2', ['A'], [('y',)])
load_data(self.cursor, 'testtable2', ['B'], [(3,)])
self.cursor.execute('SELECT A, B FROM testtable2')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', None), (None, 3)])
def test_empty_records(self):
records = []
load_data(self.cursor, 'testtable1', ['A', 'B'], records) # <- Using four args.
self.assertTrue(table_exists(self.cursor, 'testtable1'), 'should create table')
self.cursor.execute('SELECT A, B FROM testtable1')
self.assertEqual(self.cursor.fetchall(), [], 'should have zero records')
load_data(self.cursor, 'testtable2', records) # <- Using three args.
self.assertFalse(table_exists(self.cursor, 'testtable2'), 'should not create table')
def test_bad_columns_object(self):
records = [('x', 1), ('y', 2)]
columns = 'bad columns object' # <- Expects iterable of names, not this str.
with self.assertRaises(TypeError):
load_data(self.cursor, 'testtable', columns, records)
if __name__ == '__main__':
unittest.main()
| [
"shawnbrown@users.noreply.github.com"
] | shawnbrown@users.noreply.github.com |
f10f7382284cc7417f013dca6a837b09938678e6 | 04240339913c78fafab49e6a521f0aa130cfbc21 | /Arrayplusone.py | e1558c2165c62dc8619d12839274b2eaf37ed917 | [] | no_license | IreneYay/WallB_hw | ab3f4a8f9a1922f562fbc6b5c1e675867e20a7ca | 3f4246f03283427507992db83e87191be86c49d1 | refs/heads/master | 2020-07-31T05:03:34.304188 | 2019-09-24T04:37:37 | 2019-09-24T04:37:37 | 210,493,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
string = ''
for digit in digits:
string += str(digit)
number = int(string) + 1
st_ls = list(str(number))
digits = [int(c) for c in st_ls]
return digits
| [
"noreply@github.com"
] | noreply@github.com |
ef35d4d21c0c69a4d991e93868072dc6cf75a519 | 61d484ae68e40b89432f66f98164c811692ee612 | /ThirdParty/protobuf-registry/python/protobufs/services/profile/actions/get_profile_stats_pb2.py | 55e84806eed9e93479af3fa9b97e42596ef5d993 | [
"MIT"
] | permissive | getcircle/luno-ios | 2a29192c130c48415e55b50850e77a1a37f22ad1 | d18260abb537496d86cf607c170dd5e91c406f0f | refs/heads/master | 2021-05-01T04:01:52.647661 | 2016-12-05T04:54:08 | 2016-12-05T04:54:08 | 27,101,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,563 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/services/profile/actions/get_profile_stats.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protobufs.services.profile import containers_pb2 as protobufs_dot_services_dot_profile_dot_containers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/services/profile/actions/get_profile_stats.proto',
package='services.profile.actions.get_profile_stats',
syntax='proto3',
serialized_pb=b'\n:protobufs/services/profile/actions/get_profile_stats.proto\x12*services.profile.actions.get_profile_stats\x1a+protobufs/services/profile/containers.proto\"H\n\tRequestV1\x12\x13\n\x0b\x61\x64\x64ress_ids\x18\x01 \x03(\t\x12\x14\n\x0clocation_ids\x18\x02 \x03(\t\x12\x10\n\x08team_ids\x18\x03 \x03(\t\"@\n\nResponseV1\x12\x32\n\x05stats\x18\x01 \x03(\x0b\x32#.services.profile.containers.StatV1b\x06proto3'
,
dependencies=[protobufs_dot_services_dot_profile_dot_containers__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTV1 = _descriptor.Descriptor(
name='RequestV1',
full_name='services.profile.actions.get_profile_stats.RequestV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address_ids', full_name='services.profile.actions.get_profile_stats.RequestV1.address_ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='location_ids', full_name='services.profile.actions.get_profile_stats.RequestV1.location_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='team_ids', full_name='services.profile.actions.get_profile_stats.RequestV1.team_ids', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=151,
serialized_end=223,
)
_RESPONSEV1 = _descriptor.Descriptor(
name='ResponseV1',
full_name='services.profile.actions.get_profile_stats.ResponseV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stats', full_name='services.profile.actions.get_profile_stats.ResponseV1.stats', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=225,
serialized_end=289,
)
_RESPONSEV1.fields_by_name['stats'].message_type = protobufs_dot_services_dot_profile_dot_containers__pb2._STATV1
DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1
DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1
RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(
DESCRIPTOR = _REQUESTV1,
__module__ = 'protobufs.services.profile.actions.get_profile_stats_pb2'
# @@protoc_insertion_point(class_scope:services.profile.actions.get_profile_stats.RequestV1)
))
_sym_db.RegisterMessage(RequestV1)
ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEV1,
__module__ = 'protobufs.services.profile.actions.get_profile_stats_pb2'
# @@protoc_insertion_point(class_scope:services.profile.actions.get_profile_stats.ResponseV1)
))
_sym_db.RegisterMessage(ResponseV1)
# @@protoc_insertion_point(module_scope)
| [
"mwhahn@gmail.com"
] | mwhahn@gmail.com |
bba750f4f5d2b831e16a33244d5dcbf9e58ec1ac | 87dcb103e48da1fd17233232a7b4ad1d79ae50d5 | /svtplay-dl | c90f98e505eedf0ecbe8124fd1bd4cc58c18b091 | [
"MIT"
] | permissive | gusseleet/svtplay-dl | 9bd64ba5c83775a12496a3dcd42282e5171249ff | 55d811286df237738802ac9754417a8fed21280f | refs/heads/master | 2020-12-25T02:30:14.399785 | 2016-02-19T20:29:49 | 2016-02-19T20:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | #!/usr/bin/env python
print("This file is no longer updated.")
print("if you still want to use it. go to https://svtplay-dl.se/archive and download the latest one")
| [
"j@i19.se"
] | j@i19.se | |
0ffa89efc32e90f5aa9e80ddf58aa573e6770b11 | 0c1fb6f18843b3bdd0ebc7d34a35c55010922951 | /pynng/_version.py | 5d856b66c7975b40096a5ac5a35c919444ae3d4b | [
"MIT"
] | permissive | zakharov/pynng | cfff875e4b3fa4b9401fb6acf4635d1e610d78c2 | 840bf1de2818173ebc131f981401579b2ad8114b | refs/heads/master | 2020-09-15T05:08:16.652573 | 2019-10-23T03:01:17 | 2019-10-23T03:01:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | # This file is imported from __init__.py and exec'd from setup.py
__version__ = "0.4.1+dev"
| [
"cody.piersall@gmail.com"
] | cody.piersall@gmail.com |
dbcc1064037ae36418934a0b135236fad0326cfb | 6e730794b5e82de22dabcb9028abdeb0ffd30c47 | /ReadUserList.py | 25163ac77b4eeaeccc98a8b38b8211c0c3a326c4 | [] | no_license | jclemite/MyFirstScripts | cd27d5f170422e0da4482f665741b089bab9b823 | 8ef2628469aec1b44a25fb4f4e773b593cb7911e | refs/heads/master | 2021-03-10T06:15:16.856852 | 2020-03-11T00:30:04 | 2020-03-11T00:30:04 | 246,427,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | # Pull in the CSV file and store it to file
csvfile = open("UserList.csv", "r")
# Read the file to convert it into text to store in contents
csvtext = csvfile.read()
# Split the contents into a list of rows based on the newline
csvtext.split("\n")
# Loop through the rows in order to find which ones contain "229.62.232.190"
for row in rows:
# If "229.62.232.190" is found in a row, then print "COMPANY SERVER FOUND" and user info
if(row.find("229.62.232.190") > -1):
print("COMPANY SERVER FOUND")
# Split the current row on commas
info = row.split(",")
# Print out the user's name, password, and hours online
# Split the IP column into a list on semicolons
# Loop through the ip_list and print them to the screen
print("IPs:") | [
"jclemite@gmail.com"
] | jclemite@gmail.com |
04c70b36f52803ba29d570ccd09024bc08ec178b | 24629ce0770477cc32b2a071de8cabdacfc3f03c | /third_year/dsp/lab6/lab6_2.py | a2e731037970e1cd9c1f5bc4f6610e72cc0b1b0b | [
"BSD-2-Clause"
] | permissive | Snake266/labs | 491cddfed084c4782f21a3e92efc78a79f1fcef8 | baff6528ec0e58e76b65f060186fb750263cb53b | refs/heads/master | 2022-07-10T11:25:44.537328 | 2022-05-19T20:42:54 | 2022-05-19T20:42:54 | 355,807,635 | 0 | 0 | BSD-2-Clause | 2021-12-27T18:47:42 | 2021-04-08T07:35:29 | TeX | UTF-8 | Python | false | false | 1,540 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 19 19:07:54 2022
@author: georgii
"""
from numpy import arange, cos, pi, abs, angle
from scipy.fft import fft
from scipy import signal
from matplotlib.pyplot import figure, plot, grid, \
xlabel, ylabel, stem, subplot, close, tight_layout, legend
def gen_un(Tk, n):
return cos(2*pi*n/Tk)
def compute_task(Tk, n):
tmp = gen_un(Tk, n)
scompl = fft(tmp)
s = abs(scompl)
phi = angle(scompl, deg=True)
return tmp, s, phi
def plot_un(un, s, n):
subplot(211)
plot(n, un)
xlabel('Отсчеты времени')
ylabel('Отсчеты сигнала')
grid()
subplot(212)
stem(n, s)
xlabel('Нормированная частота')
ylabel('Амплитуда')
grid()
close('all')
N = 64
n = arange(0, N, 1)
Tks = [16, 15.5, 11]
for Tk in Tks:
un, s, phi = compute_task(Tk, n)
figure(num="T={}".format(Tk))
plot_un(un, s, n)
tight_layout()
un_initial = gen_un(11, n) #Tk=1 -- растекание спектра наиболее заметно
w = signal.windows.hamming(64)
un = un * w
scompl = fft(un)
s = abs(scompl)
scompl_initial = fft(un_initial)
s_initial = abs(scompl_initial)
figure('Окно Хэмминга')
subplot(211)
plot(n, un_initial, 'b', label='initial signal')
plot(n, un, 'r', label='after hamming')
grid()
legend()
subplot(212)
stem(n, s_initial, linefmt='C0-',label='initial signal')
stem(n, s, linefmt='red',markerfmt='rD', label='after hamming')
grid()
legend()
| [
"georgiiba26@gmail.com"
] | georgiiba26@gmail.com |
527d91152c67b64677184dd95c6904afda81cf7d | 690dba68a42554906bbe8bea890cff1a3c08c2a4 | /prac4/practical_4_part1.py | b5b4128da1c9ab7257cab91e81f209f897ae88cd | [] | no_license | jhylands/csteach | 4cb3bed1fb1fbe72ee1eea074f242fa00a5b820f | a10f63f253bac546ddddf4cdd55db89b38784f9f | refs/heads/master | 2021-01-10T09:07:47.144984 | 2016-02-28T14:38:21 | 2016-02-28T14:38:21 | 51,590,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | sampleText = ("As Python's creator, I'd like to say a few words about its "+
"origins, adding a bit of personal philosophy.\n"+
"Over six years ago, in December 1989, I was looking for a "+
"'hobby' programming project that would keep me occupied "+
"during the week around Christmas. My office "+
"(a government-run research lab in Amsterdam) would be closed, "+
"but I had a home computer, and not much else on my hands. "+
"I decided to write an interpreter for the new scripting "+
"language I had been thinking about lately: a descendant of ABC "+
"that would appeal to Unix/C hackers. I chose Python as a "+
"working title for the project, being in a slightly irreverent "+
"mood (and a big fan of Monty Python's Flying Circus).")
#######################################################
##
## CODE for QUESTION 1 - 4
##
#######################################################
## Please write your code here, watch for your indentation
#function to remove the return \n
def remret(string):
for i in xrange(0,len(string)-1):
if string[i:i+1] == "\n":
string = string[0:i] + " " + string[i+1:]
return string
def splitText(text):
text = remret(text)
array = []
carry = ""
for charactor in text:
if charactor == " " or charactor == "'":
if len(carry)>0:
array.append(carry)
carry = ""
elif charactor == "." or charactor == "(" or charactor == ")":
pass
else:
carry = carry + charactor
array.append(carry)
return array
def getWordsStartingWith(text, letter):
words = []
for word in splitText(text):
lowerWord = word.lower()
if lowerWord[0:1] == letter.lower():
words.append(lowerWord)
return words
def getUniqueWordsStartingWith(text, letter):
return list(set(getWordsStartingWith(text,letter)))
def printWordsFrequency(text):
words = splitText(text.lower())
wordDictionary = {}
#inishiate each element of the dictionary
for word in list(set(words)):
wordDictionary[word] = 0
#count each words occurance
for word in words:
wordDictionary[word] +=1
return wordDictionary
#get number of words in text
def numberOfWords(text):
return len(splitText(text))
def averageWordLength(text):
words = splitText(text)
total = 0
count = len(words)
for word in words:
total += len(word)
return total/count
def howManyWordsOfLength(text,length):
words = splitText(text)
count = 0
for word in words:
if len(word) == length:
count +=1
return count
def getFileLocation():
location = raw_input("Please eneter the file location>")
return location
def importText():
location = getFileLocation()
f = file(location,"r")
return f.read()
| [
"jhh521@york.ac.uk"
] | jhh521@york.ac.uk |
f4e7f0e88b95e72ed71b719cc5ec004ce4f3a78e | c84ba95b559d0d1fd142c88dffec3da45cb8e711 | /backend/users/migrations/0003_auto_20210115_1652.py | 04ad1b16a0cd2f76a84ca29e9d06e1ab48a24855 | [] | no_license | crowdbotics-apps/insta-23855 | 4460bc7f00d52a86f9c30f90249e451957d4b145 | c3abded4dc1a1dcaf201da48fe12d348468c7a02 | refs/heads/master | 2023-02-11T13:48:17.207924 | 2021-01-15T16:54:09 | 2021-01-15T16:54:09 | 329,785,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | # Generated by Django 2.2.17 on 2021-01-15 16:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20210115_0235'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='user',
name='timestamp_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
13c9726cece639eb23085d411129eaa87a551621 | 87e60b0504be11c6997f1b20b72e9428cc128342 | /ana/magic/histo.py | ea1f01da27e77f8533bcff0d15645274f3f75b83 | [] | no_license | brettviren/cowbells | 70a85856fdfc54526c847f115d5dc01ec85ec215 | 1ceca86383f4f774d56c3f159658518242875bc6 | refs/heads/master | 2021-01-10T18:44:41.531525 | 2014-04-09T15:17:29 | 2014-04-09T15:17:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,839 | py | #!/usr/bin/env python
'''
Histogram store
'''
from UserDict import DictMixin
import ROOT
class Histo(DictMixin):
'''
Provide a dictionary interface to a TDirectory (TFile) for
managing ROOT Histogram objects (any TNamed object, really).
The TDirectory must be associated with a TFile opened with the
"UPDATE" option if items are to be set on objects of this class.
Note, that this allows items to be set using a key name that may
differ from the histogram name. Getting an item by histogram name
will still work but will create a duplicate object in memory. If
you do not wish to save these do not do an explicit TFile::Write()
on the file holding the TDirectory given to Histo.
'''
def __init__(self, tdir = None):
'''
A dictionary-like collection of histograms (any TObjects,
really) tied to a file (TDirectory). <tdir> is some ROOT
TDirectory-like thing where the histograms are to be kept. It
needs to be writable in order to store histograms.
'''
self.tdir = tdir
self.bag = dict()
def __getitem__(self, name):
hist = self.bag.get(name)
if hist: return hist
if self.tdir:
hist = self.tdir.Get(name)
if not hist:
raise KeyError, 'No histogram "%s"' % name
self[name] = hist
return hist
def __setitem__(self, name, obj):
obj.SetDirectory(0)
if name != obj.GetName():
obj.SetName(name)
self.bag[name] = obj
return
def add(self, obj):
self[obj.GetName()] = obj
def keys(self):
kl = set()
if self.tdir:
kl = set([k.GetName() for k in self.tdir.GetListOfKeys()])
map(kl.add, self.bag.keys())
return list(kl)
def flush(self, tdir = None):
'''
Write all hists to directory
'''
tdir = tdir or self.tdir
if not tdir:
raise ValueError, 'No TDirectory to flush to'
for obj in self.bag.values():
tdir.WriteTObject(obj)
def test():
fd = ROOT.TFile.Open('test_histo.root','recreate')
h = Histo(fd)
h['h1key'] = ROOT.TH1F('h1name','hist1',10,-1,1)
assert h['h1key']
h['h1key'].FillRandom('gaus')
entries = h['h1key'].GetEntries()
assert entries
print 'Original entries:', entries
h.flush()
fd.Close()
del(h)
print 'Opening file read-only'
fd2 = ROOT.TFile.Open('test_histo.root','readonly')
h2 = Histo(fd2)
print 'keys',h2.keys()
assert 'h1key' in h2.keys()
print 'h1key',h2.get('h1key')
assert h2.get('h1key')
print 'h1name',h2.get('h1name')
assert not h2.get('h1name')
assert entries == h2['h1key'].GetEntries()
if __name__ == '__main__':
test()
| [
"bv@bnl.gov"
] | bv@bnl.gov |
b800cd2c856c4a73d0296b6c72dc072970c0415a | b4c0d3d58eb74d3b154190d3febcb3fef691481b | /scheme_tokens.py | 820da6c951ef9a288ff70fa4ff37d7b52b32c709 | [] | no_license | ericwxia/eScheme | 8da8ae105ed768857a40bc20820fffce05db3de3 | 4de6d95a05e9c53c2edb36519e83531a9b89e1a4 | refs/heads/master | 2016-08-11T07:20:02.143106 | 2015-12-22T15:06:23 | 2015-12-22T15:06:23 | 47,124,067 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,647 | py | """The scheme_tokens module provides functions tokenize_line and tokenize_lines
for converting (iterators producing) strings into (iterators producing) lists
of tokens. A token may be:
* A number (represented as an int or float)
* A boolean (represented as a bool)
* A symbol (represented as a string)
* A delimiter, including parentheses, dots, and single quotes
"""
from ucb import main
import itertools
import string
import sys
import tokenize
_NUMERAL_STARTS = set(string.digits) | set('+-.')
_SYMBOL_CHARS = (set('!$%&*/:<=>?@^_~') | set(string.ascii_lowercase) |
set(string.ascii_uppercase) | _NUMERAL_STARTS)
_STRING_DELIMS = set('"')
_WHITESPACE = set(' \t\n\r')
_SINGLE_CHAR_TOKENS = set("()[]'`")
_TOKEN_END = _WHITESPACE | _SINGLE_CHAR_TOKENS | _STRING_DELIMS | {',', ',@'}
DELIMITERS = _SINGLE_CHAR_TOKENS | {'.', ',', ',@'}
def valid_symbol(s):
"""Returns whether s is a well-formed symbol."""
if len(s) == 0:
return False
for c in s:
if c not in _SYMBOL_CHARS:
return False
return True
def next_candidate_token(line, k):
"""A tuple (tok, k'), where tok is the next substring of line at or
after position k that could be a token (assuming it passes a validity
check), and k' is the position in line following that token. Returns
(None, len(line)) when there are no more tokens."""
while k < len(line):
c = line[k]
if c == ';':
return None, len(line)
elif c in _WHITESPACE:
k += 1
elif c in _SINGLE_CHAR_TOKENS:
if c == ']': c = ')'
if c == '[': c = '('
return c, k+1
elif c == '#': # Boolean values #t and #f
return line[k:k+2], min(k+2, len(line))
elif c == ',': # Unquote; check for @
if k+1 < len(line) and line[k+1] == '@':
return ',@', k+2
return c, k+1
elif c in _STRING_DELIMS:
if k+1 < len(line) and line[k+1] == c: # No triple quotes in Scheme
return c+c, k+2
line_bytes = (bytes(line[k:], encoding='utf-8'),)
gen = tokenize.tokenize(iter(line_bytes).__next__)
next(gen) # Throw away encoding token
token = next(gen)
if token.type != tokenize.STRING:
raise ValueError("invalid string: {0}".format(token.string))
return token.string, token.end[1]+k
else:
j = k
while j < len(line) and line[j] not in _TOKEN_END:
j += 1
return line[k:j], min(j, len(line))
return None, len(line)
def tokenize_line(line):
"""The list of Scheme tokens on line. Excludes comments and whitespace."""
result = []
text, i = next_candidate_token(line, 0)
while text is not None:
if text in DELIMITERS:
result.append(text)
elif text == '#t' or text.lower() == 'true':
result.append(True)
elif text == '#f' or text.lower() == 'false':
result.append(False)
elif text == 'nil':
result.append(text)
elif text[0] in _SYMBOL_CHARS:
number = False
if text[0] in _NUMERAL_STARTS:
try:
result.append(int(text))
number = True
except ValueError:
try:
result.append(float(text))
number = True
except ValueError:
pass
if not number:
if valid_symbol(text):
result.append(text.lower())
else:
raise ValueError("invalid numeral or symbol: {0}".format(text))
elif text[0] in _STRING_DELIMS:
result.append(text)
else:
print("warning: invalid token: {0}".format(text), file=sys.stderr)
print(" ", line, file=sys.stderr)
print(" " * (i+3), "^", file=sys.stderr)
text, i = next_candidate_token(line, i)
return result
def tokenize_lines(input):
"""An iterator over lists of tokens, one for each line of the iterable
input sequence."""
return map(tokenize_line, input)
def count_tokens(input):
"""Count the number of non-delimiter tokens in input."""
return len(list(filter(lambda x: x not in DELIMITERS,
itertools.chain(*tokenize_lines(input)))))
@main
def run(*args):
file = sys.stdin
if args:
file = open(args[0], 'r')
print('counted', count_tokens(file), 'non-delimiter tokens')
| [
"eric.w.xia@gmail.com"
] | eric.w.xia@gmail.com |
6c589576ecac4794475e82ad5cf099ebc0984bd6 | bddb35d71b3768dc6da19d8d22865b4a66a72569 | /0x0A-python-inheritance/9-rectangle.py | 7585c62c6ad8de111bb545b70b5ff934f9d8219a | [] | no_license | ollyimanishimwe/alx-higher_level_programming | 70e67425bb1a44c8e9ccb81beeb1319fe5d846c4 | c44a66ac2c6b71bf392f250380732ce18e501592 | refs/heads/main | 2023-07-28T21:09:41.830780 | 2021-09-30T01:26:18 | 2021-09-30T01:26:18 | 377,280,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | #!/usr/bin/python3
"""Coordinates of a square"""
BaseGeometry = __import__('7-base_geometry').BaseGeometry
class Rectangle(BaseGeometry):
"""Coordinates of a square"""
def __init__(self, width, height):
"""Method"""
self.integer_validator("width", width)
self.integer_validator("height", height)
self.__width = width
self.__height = height
def area(self):
return self.__width * self.__height
def __str__(self):
return '[Rectangle] ' + str(self.__width) + '/' + str(self.__height)
| [
"imanishimweolyy@gmail.com"
] | imanishimweolyy@gmail.com |
4eb5f6a1973d51f56c5840b06100a56e3a8e22e8 | 957430fc737d07df115f80dae22ce5cd11096689 | /restaurants/table/migrations/0001_initial.py | 36d1e79ffd34d9c15c9e0a9377af92f001469bf6 | [] | no_license | Hamza-abughazaleh/Restaurant | c6ac28c029d1d2c8eadcf0a61575c54d39273623 | ecffb9a7bf11b115aa0d33617f61e72697f327cc | refs/heads/main | 2023-06-19T09:09:03.268647 | 2021-07-16T19:45:33 | 2021-07-16T19:45:33 | 386,622,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | # Generated by Django 3.2.5 on 2021-07-14 19:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import table.validation
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Table',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('table_number', models.IntegerField(error_messages={'unique': 'A Table number already exists.'}, unique=True, verbose_name='Employee number')),
('seats_number', models.IntegerField(validators=[table.validation.validate_table_seats], verbose_name='Employee number')),
('userid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"hamzaabughazaleh23@gmail.com"
] | hamzaabughazaleh23@gmail.com |
6764d6567a70fd6c2f2886bcd6dfc1234234f72f | edf31957838a65e989d5eb5e8118254ac2413fc8 | /parakeet/analysis/collect_vars.py | 66543535c72ccdc08e79053098b7cefbdccc4db0 | [
"BSD-3-Clause"
] | permissive | iskandr/parakeet | e35814f9030b9e8508a7049b62f94eee5b8c5296 | d9089f999cc4a417d121970b2a447d5e524a3d3b | refs/heads/master | 2021-07-18T19:03:05.666898 | 2019-03-13T17:20:20 | 2019-03-13T17:20:20 | 5,889,813 | 69 | 7 | NOASSERTION | 2021-07-17T21:43:03 | 2012-09-20T16:54:18 | Python | UTF-8 | Python | false | false | 1,523 | py | from .. syntax import Var, Tuple
from syntax_visitor import SyntaxVisitor
class SetCollector(SyntaxVisitor):
def __init__(self):
SyntaxVisitor.__init__(self)
self.var_names = set([])
def visit_Var(self, expr):
self.var_names.add(expr.name)
def collect_var_names(expr):
collector = SetCollector()
collector.visit_expr(expr)
return collector.var_names
def collect_var_names_from_exprs(exprs):
collector = SetCollector()
collector.visit_expr_list(exprs)
return collector.var_names
class ListCollector(SyntaxVisitor):
def __init__(self):
SyntaxVisitor.__init__(self)
self.var_names = []
def visit_Var(self, expr):
self.var_names.append(expr.name)
def collect_var_names_list(expr):
collector = ListCollector()
collector.visit_expr(expr)
return collector.var_names
def collect_binding_names(lhs):
lhs_class = lhs.__class__
if lhs_class is Var:
return [lhs.name]
elif lhs.__class__ is Tuple:
combined = []
for elt in lhs.elts:
combined.extend(collect_binding_names(elt))
return combined
else:
return []
class CollectBindings(SyntaxVisitor):
def __init__(self):
SyntaxVisitor.__init__(self)
self.bindings = {}
def bind(self, lhs, rhs):
if lhs.__class__ is Var:
self.bindings[lhs.name] = rhs
elif lhs.__class__ is Tuple:
for elt in lhs.elts:
self.bind(elt, rhs)
def visit_Assign(self, stmt):
self.bind(stmt.lhs, stmt.rhs)
def collect_bindings(fn):
return CollectBindings().visit_fn(fn)
| [
"alex.rubinsteyn@gmail.com"
] | alex.rubinsteyn@gmail.com |
b6293e11242c694c26602b35f2ac13d2b23179dc | 86da8478bd5b28045581445263fded606f592158 | /tests/network/nano_node/data/http/empty_watching.py | c91229055452ec790f8826079b0f4474b6efc22f | [
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Matoking/siliqua | c2053214187ed6a2a1d418daf7e43108770c731c | b943822631ab18dde85e95d1731ebd7ffd7ef14a | refs/heads/master | 2020-08-28T02:59:53.841369 | 2019-11-18T17:00:26 | 2019-11-18T17:00:26 | 217,568,445 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from tests.network.nano_node.conftest import HTTPReplay
DATA = [
HTTPReplay(
{
"action": "account_history",
"account": "xrb_15n1wthxc5ndjnoufdfe8m4z5j973o6trzwbfys4cu4gtju5mh4xc918fout",
"count": 500,
"raw": True,
"reverse": True
},
{
"error": "Account not found"
}
)
]
| [
"jannepulk@gmail.com"
] | jannepulk@gmail.com |
0da7ffb9dba9e439310bf1bcbf75e589d3e0994f | a840bc7137f4b52bb7037bf43dd3a299477826ad | /rdlcompiler/config.py | 57594f1de9e938b347540421bc68ccbe2cd62316 | [
"MIT"
] | permissive | MegabytePhreak/rdl | 766d68948af021f976d761efb5e3084ea4d6c500 | b97912751738da9fddb42a9155f5823716809fc0 | refs/heads/master | 2020-05-30T14:28:14.330827 | 2014-04-15T23:57:35 | 2014-04-15T23:57:35 | 15,353,986 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | __author__ = 'MegabytePhreak'
import ConfigParser
import sys
import os.path
class Config(ConfigParser.RawConfigParser, object):
_cfg_defaults = {
'rdlcompiler': {
'outputs': 'verilog vhdl html c'
},
'preprocessor': {
'perl': 'perl',
'perlpp': '/home/megabytephreak/code/rdl/perlpp/rdlcompiler_perlpp.pl',
'vppreproc': '/usr/bin/site_perl/vppreproc',
}
}
_instance = None
if sys.platform.startswith('win'):
SYSTEM_CONFIG_PATH = os.path.expandvars(r'%PROGRAMDATA%\rdlcompiler\rdlcompiler.conf')
USER_CONFIG_PATH = os.path.expandvars(r'%LOCALAPPDATA%\rdlcompiler\rdlcompiler.conf')
else:
SYSTEM_CONFIG_PATH = '/etc/rdlcompiler.conf'
USER_CONFIG_PATH = os.path.expanduser('~/.rdlcompiler.conf')
@classmethod
def create(cls, *args, **kwargs):
cls._instance = Config(*args, **kwargs)
@classmethod
def cfg(cls):
return cls._instance
def __init__(self, defaults=None, *args, **kwargs):
super(Config, self).__init__(*args, **kwargs)
if defaults is None:
defaults = self._cfg_defaults
for section, options in defaults.iteritems():
self.add_section(section)
for option, value in options.iteritems():
self.set(section, option, value)
| [
"roukemap@gmail.com"
] | roukemap@gmail.com |
34f85ad410331a5914a2517ee3343c14572b7b59 | 7a2bfe09f7526c36fce304999fa47466b89fdec2 | /profiles/models.py | 7cbf380d6bb77aeabe96546b9fe12b082a1ed6fc | [] | no_license | Brachamul/fichier-jdem | 179344ba64b830c3f6e352907e470a1db8d42a9b | f9b40657aea54db83b3abd3e7b38fec9260d34e9 | refs/heads/master | 2021-05-01T00:37:50.021517 | 2019-02-07T15:02:06 | 2019-02-07T15:02:06 | 58,691,054 | 0 | 0 | null | 2017-07-04T21:13:01 | 2016-05-13T02:01:05 | Python | UTF-8 | Python | false | false | 2,247 | py | from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save
from django.dispatch import receiver
from fichiers_adherents.models import FichierAdherents, Adherent, Cnil, adherents_actuels
class Member(models.Model):
id = models.IntegerField(primary_key=True)
phoneless = models.BooleanField(default=False)
def historique_adherent(self):
return Adherent.objects.filter(num_adherent=self.id)
def derniere_occurence_fichier(self):
adherents = Adherent.objects.filter(num_adherent=self.id)
fichier = FichierAdherents.objects.filter(adherent__in=adherents)
return Adherent.objects.get(num_adherent=self.id, fichier=fichier.latest())
def notes(self):
return Note.objects.filter(member=self)
def __str__(self):
return str(self.derniere_occurence_fichier())
def initiate(fichier=False):
''' Generate, for all fichiers or a single one, members for each adherent
this is used when rebuilding the DB '''
if fichier :
adherents = Adherent.objects.filter(fichier=fichier)
else :
adherents = Adherent.objects.all()
for adherent in adherents :
new_member, created = Member.objects.get_or_create(id=adherent.num_adherent)
def check_if_phoneless(self):
''' Returns 'True' if the adherent has no phone number '''
self.phoneless = self.derniere_occurence_fichier().phoneless()
self.save()
@receiver(post_save, sender=Adherent)
def initiate_member(sender, instance, created, **kwargs):
new_member, created = Member.objects.get_or_create(id=instance.num_adherent)
new_member.check_if_phoneless()
class Note(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
text = models.CharField(max_length=1024)
date = models.DateTimeField(auto_now_add=True)
def __str__(self): return self.text
# https://codepen.io/codyhouse/pen/FdkEf
class WrongNumber(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
reported_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
date = models.DateTimeField(auto_now_add=True)
def __str__(self): return self.member | [
"barnaby.brachamul@gmail.com"
] | barnaby.brachamul@gmail.com |
c65f1d22e75919aa6f2e0331224a469b38b07173 | c2ef3d85a051ad804c85e2b6c0b8a80f3404e0ac | /node.py | 1872ac0ee492e68677965835e9b95b4e1b71fb37 | [] | no_license | nrjshka/blockchain | dbd3a0809811d491faa8f8d30221e4441ba23abc | 48ffe50b86be7ea823f3465829633f4551a4d596 | refs/heads/master | 2021-05-08T19:47:35.614926 | 2018-01-28T17:58:40 | 2018-01-28T17:58:40 | 119,579,122 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from block import Block
from flask import Flask
import sync
import os
import json
node = Flask(__name__)
node_blocks = sync.sync()
@node.route('/blockchain.json', methods=['GET'])
def blockchain():
'''
Shoots back the blockchain, which in our case, is a json list of hashes
with the block information which is:
index
timestamp
data
hash
prev_hash
'''
node_blocks = sync.sync() #update if they've changed
# Convert our blocks into dictionaries
# so we can send them as json objects later
python_blocks = []
for block in node_blocks:
'''
block_index = str(block.index)
block_timestamp = str(block.timestamp)
block_data = str(block.data)
block_hash = block.hash
block = {
"index": block.index,
"timestamp": block.timestamp,
"data": block.data,
"hash": block.hash,
"prev_hash": block.prev_hash
}
'''
python_blocks.append(block.__dict__())
json_blocks = json.dumps(python_blocks)
return json_blocks
if __name__ == '__main__':
node.run()
| [
"nrjshka@gmail.com"
] | nrjshka@gmail.com |
43f14733dab7aeb55fa6638826d2082794833096 | 8d5c089e60786e69ad9ace52f9e8c213598ebdaa | /temp.py | d10f094e6c712446ab05ea6869df8ed6704055a7 | [] | no_license | kkranze/sandbox | 07182da2bcfce8068c5792636b1ffb670daec824 | 7f7ccb84a4c5394ba2f2dbb85a34ec90fe0dd78c | refs/heads/master | 2021-08-29T10:53:34.845262 | 2017-12-13T19:38:56 | 2017-12-13T19:38:56 | 103,175,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | temp = input("what is the temp in celcius")
temp = float(temp)
isfreezing = temp <= 0.0
isnormal = temp < 100.0 and temp > 0.0
isgas = temp > 100.0
if(isfreezing):
print("ice")
elif(isnormal):
print("water")
elif(isgas):
print("gas")
| [
"kkranze@sjnma.org"
] | kkranze@sjnma.org |
4ce536bc1b17dc9cebbe3a53d7364bca5d55f1f6 | 6db9df0fc8629ca61601d8b556d9389feaac657c | /1_introduction_au_langage_python/tri_rapide.py | cef8cd0fe84b5ee36cb7907986518140e2b90edc | [] | no_license | mba-tradelab/programmation_python_mathematiques | 2297d618957cb22caaad4ab9f55640e12316eceb | 8b79dffbe0a01ff67aed5d337a9d975f5e289866 | refs/heads/main | 2023-01-12T01:55:48.693084 | 2020-11-06T19:08:43 | 2020-11-06T19:08:43 | 248,700,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | #!/home/antounes/anaconda3/bin/python3
#-*- coding: Utf-8 -*-
# Génération aléatoire d'une liste
from random import randrange
n = 20; liste = [randrange(100) for i in range(n)]
# Tri rapide
# On choisit un élément au hasard que l'on appelle "pivot"
# On le met à sa place définitive en plaçant
# tous les éléments qui sont plus petits à sa gauche
# et tous les éléments qui sont plus grands à sa droite
# On recommence ensuite le tri sur les deux sous-listes obtenues
# jusqu'à ce que la liste soit triée
# Par exemple pour [10, 1, 5, 19, 3, 3, 2, 17] choisissons 10 comme premier pivot
# On place les éléments plus petits que 10 à gauche et les plus grands à droite
# [1, 5, 3, 3, 2, 10, 19, 17]
# Il reste deux sous-listes à trier selon la même méthode
# [1, 5, 3, 3, 2] et [19, 17]
def tri_rapide(liste):
# Effectue un tri de liste en utilisant l'algorithme de tri rapide
if liste == []: return []
return (tri_rapide([x for x in liste[1:] if x < liste[0]))
+ [liste[0]] + tri_rapide([x for x in liste[1:] if x >= liste[0]))
| [
"matthieu.brito.antunes@gmail.com"
] | matthieu.brito.antunes@gmail.com |
19f6b9fdafe10ff719637dbb61652a609ed9605b | 31014d91da72a0912ab3a7553846a8b1e4bd3b65 | /tests/__init__.py | e041fbcd3dc60614f74004e490c2deceeb5bbc9c | [
"MIT"
] | permissive | fossabot/covariance | 4f65ca0e0f5babeee953fac7791d7656eab09ea8 | 29e53861a0141039fe4c48580b1785d0b1be263f | refs/heads/master | 2020-05-04T05:40:36.262392 | 2019-04-02T03:17:15 | 2019-04-02T03:17:15 | 178,990,282 | 0 | 0 | null | 2019-04-02T03:17:10 | 2019-04-02T03:17:09 | null | UTF-8 | Python | false | false | 1,198 | py | # -*- coding: utf-8 -*- #
#
# tests/__init__.py
#
#
# MIT License
#
# Copyright (c) 2019 Brandon Gomes
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Covariance Testing Suite.
"""
| [
"bhgomes.github@gmail.com"
] | bhgomes.github@gmail.com |
f6345fee883766347e8a49dfa0c93038f32995b2 | 48a7b266737b62da330170ca4fe4ac4bf1d8b663 | /molsysmt/_private/digestion/argument/chi3.py | ef60e618382a4208ce40cd84eadebbd653dad6de | [
"MIT"
] | permissive | uibcdf/MolSysMT | ddab5a89b8ec2377f383884c5169d147cab01322 | c3d713ba63db24eb8a2426115cf8d9cb3665d225 | refs/heads/main | 2023-08-08T15:04:16.217967 | 2023-08-04T05:49:56 | 2023-08-04T05:49:56 | 137,937,243 | 15 | 3 | MIT | 2023-06-04T20:27:06 | 2018-06-19T19:38:44 | Python | UTF-8 | Python | false | false | 407 | py | from ...exceptions import ArgumentError
methods_bool_input = ["molsysmt.topology.get_dihedral_quartets.get_dihedral_quartets",
"molsysmt.structure.get_dihedral_angles.get_dihedral_angles"]
def digest_chi3(chi3, caller=None):
if caller in methods_bool_input:
if isinstance(chi3, bool):
return chi3
raise ArgumentError('chi3', value=chi3, caller=caller, message=None)
| [
"prada.gracia@gmail.com"
] | prada.gracia@gmail.com |
84b627ba4602460bc55788ddd1b76ae755127542 | df92c0be5acca5802948720c10420045c1a049af | /app/schemas/SchedualeSchema.py | b6f97ed936210fcc77a81913967d7208dc2fc4bb | [] | no_license | abdalmonem/ustBoardBackEnd | c7148977cc5d56970cd77ae81dbef73d09a10fe3 | c718f8b0099b2cf6e155d3822d2501b50fd0db2c | refs/heads/master | 2023-03-30T22:19:02.533159 | 2021-03-23T06:41:45 | 2021-03-23T06:41:45 | 314,628,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from marshmallow import fields
from .. import ma
class SchedualeSchema(ma.Schema):
position = fields.Int()
day = fields.Str()
material_id = fields.Int()
| [
"48384773+donato95@users.noreply.github.com"
] | 48384773+donato95@users.noreply.github.com |
da60dde1e796db0872b0c257e878c1ebb4826cda | ffff723a6c8527b45299a7e6aec3044c9b00e923 | /PS/BOJ/1238/1238.py | 599cad18df66ed2c7caf926d2eb19296b2ffb8d7 | [] | no_license | JSYoo5B/TIL | 8e3395a106656e090eeb0260fa0b0dba985d3beb | 3f9ce4c65451512cfa2279625e44a844d476b68f | refs/heads/master | 2022-03-14T09:15:59.828223 | 2022-02-26T01:30:41 | 2022-02-26T01:30:41 | 231,383,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | #!/usr/bin/env python3
import heapq
INF = 10 ** 9
input = __import__('sys').stdin.readline
heappush = heapq.heappush
heappop = heapq.heappop
def get_dist_to_others(edges, src):
nodes_cnt = len(edges)
dists = [INF for _ in range(nodes_cnt)]
heap = [ [0, src] ]
while len(heap) > 0:
[dist, node] = heappop(heap)
dists[node] = min(dists[node], dist)
for n, d in edges[node]:
if dist + d < dists[n]:
dists[n] = dist + d
heappush(heap, [dists[n], n])
return dists
if __name__ == '__main__':
nodes_cnt, edges_cnt, tgt_id = map(int, input().split())
tgt_id -= 1 # convert into zero offset
edges = [ [ ] for _ in range(nodes_cnt) ]
for _ in range(edges_cnt):
src, dst, dist = map(int, input().split())
edges[src-1].append([dst-1, dist])
single_dists = []
for n in range(nodes_cnt):
dist = get_dist_to_others(edges, n)
single_dists.append(dist)
return_dists = []
for n in range(nodes_cnt):
dist = single_dists[n][tgt_id] + single_dists[tgt_id][n]
return_dists.append(dist)
answer = max(return_dists)
print(answer)
| [
"jsyoo5b@gmail.com"
] | jsyoo5b@gmail.com |
58524a12b5b29b2ac831f2220fdc3e44327a3539 | 419226d826561d09f5fc645f1397116e153ba656 | /1_a_bite_of_python/1/function_keyword.py | 60d8a150d79a8d4d4c15719caaabba62aaca257d | [] | no_license | andrei-kozel/Python | 6ed2a612637bc93a0578166d6e09afeecd4dac73 | d6c88ee80af4fdd0d5dd9700af74d50ab3f85759 | refs/heads/master | 2022-01-22T03:15:22.055754 | 2019-08-19T15:38:56 | 2019-08-19T15:38:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | def func(a, b=5, c=10):
print(f'a is {a}, and b is {b}, anc c is {c}')
func(3,7)
func(25, c=12) | [
"bexxxa38@gmail.com"
] | bexxxa38@gmail.com |
25214108f46273f7f891dc6e191487fd672383d8 | dfc5d42556bd61f69374d07b5ba0fe67f1aa6e92 | /products/migrations/0003_auto_20201126_1129.py | a20010c73ba054f2903f9e9178f8fcef6080382f | [] | no_license | kosisoNBG/PyShop | c4981e6554926f227e3191cb86ee92e58367855a | 50b8d8d0aceeddc877499a2ef7afa49c5dc53f9e | refs/heads/main | 2023-03-20T05:22:54.931419 | 2021-03-09T21:43:37 | 2021-03-09T21:43:37 | 317,730,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # Generated by Django 2.1 on 2020-11-26 11:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0002_offer'),
]
operations = [
migrations.AlterField(
model_name='offer',
name='code',
field=models.CharField(max_length=10),
),
]
| [
"devinfavourigboagui@gmail.com"
] | devinfavourigboagui@gmail.com |
398f21dbb2ac383e748ab1e06f2585696364df4e | aeca7d8d822df582590e688e76207a7986137c86 | /backend/src/climate/urls.py | f5eae18431586953e3bc63b8d7fe79446b792c0c | [] | no_license | davidwonghk/climate-uk | 4ae3ea1eb43d6143ed2749140d7d4e5a87d89df9 | ece6997f8c2190bbfa43ada668a22d225619e0ac | refs/heads/master | 2016-09-14T02:36:24.766722 | 2016-05-07T00:38:03 | 2016-05-07T00:38:03 | 58,213,119 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'pull', views.pull, name='pull'),
url(r'options', views.options, name='options'),
url(r'^$', views.index, name='index'),
]
| [
"josh1313@hotmail.com.hk"
] | josh1313@hotmail.com.hk |
6cb691ce7051a76fc2ac3de30b342f0186007575 | 0a90b58451d66b02fec5426f63b1751150915a5e | /Chapter02/Exercise2.02/Exercise2.02_Unit_test.py | 439bca457d8d1071fa9c92d5dd2cc76890e29c69 | [] | no_license | PacktWorkshops/The-TensorFlow-Workshop | fc6405021ae94f33a812ba62906061ca6ac485c4 | 38b49594a9cd1b984132346cd8189561ca22cb12 | refs/heads/master | 2023-01-05T16:01:57.189386 | 2021-12-09T06:03:01 | 2021-12-09T06:03:01 | 246,982,908 | 37 | 37 | null | 2022-12-26T21:37:39 | 2020-03-13T03:45:38 | Jupyter Notebook | UTF-8 | Python | false | false | 1,017 | py | import import_ipynb
import tensorflow as tf
import pandas as pd
class DataTest(tf.test.TestCase):
def setUp(self):
import Exercise2_02
super(DataTest, self).setUp()
self.exercise = Exercise2_02
self.data = pd.read_csv('../Datasets/Bias_correction_ucl.csv')
self.data['Date'] = pd.to_datetime(self.data['Date'])
def testData(self):
output = self.exercise.df
expected_output = self.data
pd.testing.assert_frame_equal(expected_output, output)
def testProcessedData(self):
year_dummies = pd.get_dummies(self.data['Date'].dt.year, prefix='year')
month_dummies = pd.get_dummies(self.data['Date'].dt.month, prefix='month')
df2 = pd.concat([self.data, month_dummies, year_dummies], axis=1)
df2.drop('Date', axis=1, inplace=True)
output = self.exercise.df2
expected_output = df2
pd.testing.assert_frame_equal(expected_output, output)
if __name__ == '__main__':
tf.test.main() | [
"moocarme@gmail.com"
] | moocarme@gmail.com |
546664dc944f734fde1b16887bc05cfe6763ff9b | 65662b604fa40bdc6e8648e39ed201b0dd8ad6fd | /Python Specialization/Course 4/code/party4.py | 257a2d0f8d47dc1b565fc7854b62718b830ad3d4 | [
"MIT"
] | permissive | rubysubash/Coursera-Specializations | 973f9dbc01774dae84d90b6b97870a6dfde674bc | 88acc792bbee20e8d9b8d34ff6f7c3072236d6f3 | refs/heads/master | 2020-08-10T02:43:08.277860 | 2020-06-02T09:48:25 | 2020-06-02T09:48:25 | 214,237,214 | 0 | 0 | MIT | 2019-10-10T16:52:27 | 2019-10-10T16:52:27 | null | UTF-8 | Python | false | false | 295 | py | class PartyAnimal:
x = 0
name = ""
def __init__(self, nam):
self.name = nam
print self.name,"constructed"
def party(self) :
self.x = self.x + 1
print self.name,"party count",self.x
s = PartyAnimal("Sally")
s.party()
j = PartyAnimal("Jim")
j.party()
s.party()
| [
"amandalmia18@gmail.com"
] | amandalmia18@gmail.com |
ecb6d93377151ec11094a9ee9ea738af7f894f94 | 76ffafb479781dd8fbdcb2bff092d80ecf48f478 | /manage.py | 3a53a1bc9d6c0c2d937e92341a4230b90c569fa4 | [] | no_license | iheyou/hynovel | 41cbaaccbe54ae189b9f88a86b6071541a2081f2 | 2a4a509b21499ac6e1b5e62de3b95e55ae5fa055 | refs/heads/master | 2021-04-26T22:03:19.368990 | 2018-03-06T13:36:22 | 2018-03-06T13:36:22 | 124,017,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hynovel.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"heyou0812@gmail.com"
] | heyou0812@gmail.com |
f79a47370c73e4ae2a6103c656dd82bb344c29fa | 3cecba834146371131fd5a6b1600ddb60e7068bd | /StyleBookGenerator/generater/tst2.py | 46fed96027b6e4e05c1f340a6e4ef5936c6f20e0 | [] | no_license | woohyukkk/Giovanna_OS_inventory_auto_update_system | 86e52af40beca91e21149b27931d156421cf90e9 | 3b31dd7f54151f7a45d9965faa0c4d03a1d1a5b7 | refs/heads/master | 2020-03-16T23:11:55.730752 | 2019-03-06T22:56:20 | 2019-03-06T22:56:20 | 133,066,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import os
import csv
import sys
font1="arial.ttf"
def watermark_text(input_image_path,output_image_path,text, pos):
photo = Image.open(input_image_path)
#output_image_path=output_image_path[0:len(output_image_path)-4]+' with watermark.jpg'
# make the image editable
drawing = ImageDraw.Draw(photo)
black = (3, 8, 12)
font = ImageFont.truetype(font1, 55) #hat 35 other 55
drawing.text(pos, text, fill=black, font=font)
#photo.show()
photo.save(output_image_path)
watermark_text('sample.jpg','sample.jpg','D14XX\nPrice: xx$\nColor:\nxxxxx\nxxxxxx\nxxxxx\nxxxxx\nxxxxx\n',pos=(16,16))
base_image=Image.new('RGB',(5308,4096))
addPic = Image.open('sample.jpg')
# add watermark to your image
x=0
y=0
for num in range(0,8):
print (num,x,y)
if num==4:
x=0
y+=2048
base_image.paste(addPic, (x,y) )
x+=1327
base_image=base_image.resize((int(5308*0.8),int(4096*0.8)),Image.ANTIALIAS)
base_image.show()
input(' ') | [
"woo328968014@gmail.com"
] | woo328968014@gmail.com |
7ee6ce359ed6e1d813749604b80b9ad58f8c50d4 | dadc87ea470cc3879aff2d2f2c7c43dcf93952d9 | /ex7_2.py | 21e121a5bffcd1ecc34b51bc6be9f998eaf141cf | [] | no_license | geminiwenxu/machine_learning_1 | d084816248a6497a3e956c0202987e94af2e3613 | 186dc0af3a1b680b6ccd87d4c88065488d26e42b | refs/heads/master | 2023-04-26T00:10:00.713162 | 2021-05-17T18:01:13 | 2021-05-17T18:01:13 | 368,276,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,831 | py | import numpy as np
import scipy.stats as stats
from matplotlib import pyplot as plt
def label(theta):
return "[%2.2f, %2.2f]" % (theta[0], theta[1])
def generate_data(size):
x = np.random.uniform(low=-1.0, high=1.0, size=size)
noise = np.random.normal(loc=0, scale=0.2, size=size)
t_n_ls = []
for i in range(size):
t_n = -0.3 + 0.5 * x[i] + 0.4 * x[i] ** 2 + noise[i]
t_n_ls.append(t_n)
return t_n_ls
def model(x, mb):
for i in range(len(x)):
return -0.3 + mb[0] * x[i] + mb[1] * x[i] ** 2
def likelihood(t, model, x, w):
mu = model(x, w)
ps = stats.norm.pdf(t, mu, 0.2)
l = 1
for p in ps:
l = l * p
return l
def prior(MB):
S0 = np.array([[0.5, 0], [0.0, 0.5]])
m0 = np.array([[0], [0]])
Prior = stats.multivariate_normal.pdf(MB, m0.ravel(), S0)
Prior = Prior.reshape(M.shape)
return Prior
def posterior(prior, likelihood):
posterior = np.multiply(prior, likelihood)
return posterior
if __name__ == '__main__':
# creating 10 data points (x, t)
N = 10
x = np.linspace(-1, 1, N)
t = generate_data(N)
# draw data
# plt.plot(x, t, 'k.', markersize=20, label='data points', markeredgecolor='w')
mb0 = [0.5, 0.4]
y = -0.3 + 0.5 * x + 0.4 * x ** 2
# plt.plot(x, y, label='true model')
plt.xlabel('x')
plt.ylabel('y')
# plt.legend()
# create array to cover parameter space
res = 100
M, B = np.meshgrid(np.linspace(-1, 1, res), np.linspace(-1, 1, res))
MB = np.c_[M.ravel(), B.ravel()]
# design three in one figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
'''row 1'''
# plot the original prior
prior_1 = prior(MB)
# ax2.contourf(M, B, prior_1)
ax2.set_title('Prior Prob. Dist.')
ax2.set_xlabel('w_1')
ax2.set_ylabel('w_2')
# Plot the 6 sample y(x,w)
mean = np.zeros(shape=3, dtype=int)
alpha = 2
cov = alpha ** (-1) * np.identity(3)
w = np.random.multivariate_normal(mean, cov, size=100)
x = np.linspace(-1, 1, 100)
for i in range(6):
y = w[i][0] + w[i][1] * x + w[i][2] * x ** 2
plt.xlim(-1, 1)
plt.ylim(-1, 1)
# plt.plot(x, y)
plt.title('data space')
plt.xlabel('x')
plt.ylabel('y')
observed_x_1 = [0.7666]
observed_y_1 = w[1][0] + w[1][1] * x[0] + w[1][2] * x[0] ** 2
'''row 2'''
# calculate likelihood function
L = np.array([likelihood(observed_y_1, model, observed_x_1, mb.reshape(2, 1)) for mb in MB]).reshape(M.shape)
# draw likelihood function
# ax1.contourf(M, B, L)
ax1.set_title('Likelihood')
ax1.set_xlabel('w_1')
ax1.set_ylabel('w_2')
# posterior
Posterior_1 = posterior(prior_1, L)
# ax2.contourf(M, B, Posterior_1)
ax2.set_title('Posterior Prob. Dist.')
ax2.set_xlabel('w_1')
ax2.set_ylabel('w_2')
# Plot the 6 sample y(x,w) with w0 and w1 drawn from the posterior
w1 = np.linspace(-0.1, 0.7, 10)
w2 = np.linspace(-0.1, 0.6, 10)
x = np.linspace(-1, 1, 100)
for i in range(6):
y = -0.3 + w1[i] * x + w2[i] * x ** 2
plt.xlim(-1, 1)
plt.ylim(-1, 1)
# plt.plot(x, y)
plt.title('data space')
plt.xlabel('x')
plt.ylabel('y')
observed_x_2 = [0.3, 0.5]
observed_y_2_0 = -0.3 + w1[1] * x[0] + 0.4 * x[0] ** 2
observed_y_2_1 = -0.3 + w1[1] * x[1] + 0.4 * x[1] ** 2
observed_y_2 = [observed_y_2_0, observed_y_2_1]
'''row 3 '''
# calculate likelihood function
L = np.array([likelihood(observed_y_2, model, observed_x_2, mb.reshape(2, 1)) for mb in MB]).reshape(M.shape)
# draw likelihood function
# ax1.contourf(M, B, L)
ax1.set_title('Likelihood')
ax1.set_xlabel('w_1')
ax1.set_ylabel('w_2')
# posterior
Posterior_2 = posterior(Posterior_1, L)
# ax2.contourf(M, B, Posterior_2)
ax2.set_title('Posterior Prob. Dist.')
ax2.set_xlabel('w_1')
ax2.set_ylabel('w_2')
# Plot the 6 sample y(x,w) with w0 and w1 drawn from the posterior
w1 = np.linspace(-0.5, -0.1, 200)
w2 = np.linspace(0.2, 0.7, 200)
x = np.linspace(-1, 1, 100)
# y = -0.3 + w1[1] * x + w2[1] * x ** 2
# plt.plot(x, y)
# y = -0.3 + w1[20] * x + w2[20] * x ** 2
# plt.plot(x, y)
# y = -0.3 + w1[60] * x + w2[60] * x ** 2
# plt.plot(x, y)
# y = -0.3 + w1[100] * x + w2[100] * x ** 2
# plt.plot(x, y)
# y = -0.3 + w1[120] * x + w2[120] * x ** 2
# plt.plot(x, y)
# y = -0.3 + w1[180] * x + w2[80] * x ** 2
# plt.plot(x, y)
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
plt.title('data space')
plt.xlabel('x')
plt.ylabel('y')
observed_x_3 = np.linspace(-1, 1, 200)
observed_y_3 = []
for j in range(200):
temp = -0.3 + w1[j] * observed_x_3[j] + w2[j] * observed_x_3[j] ** 2
observed_y_3.append(temp)
'''row 4'''
# calculate likelihood function
L = np.array([likelihood(observed_y_3, model, observed_x_3, mb.reshape(2, 1)) for mb in MB]).reshape(M.shape)
# draw likelihood function
ax1.contourf(M, B, L)
ax1.set_title('Likelihood')
ax1.set_xlabel('w_1')
ax1.set_ylabel('w_2')
# posterior
Posterior_3 = posterior(Posterior_2, L)
ax2.contourf(M, B, Posterior_3)
ax2.set_title('Posterior Prob. Dist.')
ax2.set_xlabel('w_1')
ax2.set_ylabel('w_2')
# Plot the 6 sample y(x,w) with w0 and w1 drawn from the posterior
w0 = np.linspace(-0.31, -0.29, 10)
w1 = np.linspace(0.51, 0.5, 10)
x = np.linspace(-1, 1, 100)
for i in range(6):
y = -0.3 + w1[i] * x + w2[i] * x ** 2
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.plot(x, y)
plt.title('data space')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
| [
"41744366+geminiwenxu@users.noreply.github.com"
] | 41744366+geminiwenxu@users.noreply.github.com |
21ffc6d27d9a03f77715918767b026d41c14815c | 47243c719bc929eef1475f0f70752667b9455675 | /bungeni.main/branches/larry_pitcher_deliverance/bungeni/models/tests.py | 7c17f7d64ffa4c2ab9651d6e6789e18dabdccf85 | [] | no_license | malangalanga/bungeni-portal | bbf72ce6d69415b11287a8796b81d4eb6520f03a | 5cf0ba31dfbff8d2c1b4aa8ab6f69c7a0ae9870d | refs/heads/master | 2021-01-19T15:31:42.943315 | 2014-11-18T09:03:00 | 2014-11-18T09:03:00 | 32,453,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,015 | py | """
$Id$
"""
from zope import interface
from zope import component
import unittest
from zope.testing import doctest, doctestunit
from zope.app.testing import placelesssetup, ztapi
from zope.configuration import xmlconfig
from bungeni.models import schema, interfaces
from interfaces import IAssignmentFactory, IContentAssignments, IContextAssignments
from bungeni.core.workflows import adapters
from bungeni.ui import descriptor
zcml_slug = """
<configure xmlns="http://namespaces.zope.org/zope"
xmlns:db="http://namespaces.objectrealms.net/rdb"
>
<include package="bungeni.alchemist" file="meta.zcml"/>
<include package="alchemist.catalyst" file="meta.zcml"/>
<!-- Setup Database Connection -->
<db:engine name="bungeni-db" url="postgres://localhost/bungeni-test" />
<db:bind engine="bungeni-db" metadata="bungeni.models.schema.metadata" />
<db:bind engine="bungeni-db" metadata="bungeni.alchemist.security.metadata" />
</configure>
"""
def setUp(test):
placelesssetup.setUp()
xmlconfig.string(zcml_slug)
schema.metadata.create_all(checkfirst=True)
def tearDown(test):
placelesssetup.tearDown()
schema.metadata.drop_all(checkfirst=True)
def assignment_tests():
import assignment
def _setUp(test):
setUp(test)
ztapi.provideAdapter((interfaces.IBungeniContent, interfaces.IBungeniGroup),
IAssignmentFactory,
assignment.GroupAssignmentFactory)
ztapi.provideAdapter(interfaces.IBungeniContent,
IContentAssignments,
assignment.ContentAssignments)
ztapi.provideAdapter(interfaces.IBungeniGroup,
IContextAssignments,
assignment.GroupContextAssignments)
return doctestunit.DocFileSuite('assignment.txt',
setUp = _setUp,
tearDown = tearDown,
optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS
)
def test_suite():
doctests = ('readme.txt',
'settings.txt',
#!+BookedResources 'resourcebooking.txt',
'venue.txt'
)
globs = dict(interface=interface, component=component)
test_suites = []
for filename in doctests:
test_suite = doctestunit.DocFileSuite(filename,
setUp = setUp,
tearDown = tearDown,
globs = globs,
optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
test_suites.append(test_suite)
test_suites.append(assignment_tests())
return unittest.TestSuite(test_suites)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| [
"larry.pitcher@gmail.com@fc5d704a-7d24-0410-8c4a-57ddeba10ffc"
] | larry.pitcher@gmail.com@fc5d704a-7d24-0410-8c4a-57ddeba10ffc |
2739ca371bf7aae0357f49ca9d5c508308fc5c31 | 561bbdf5e7f3c0793dafff0adc818300a6d4331c | /fibonacci.py | 572a45560ed0fa5f19a790f726a546b58be17ff1 | [] | no_license | lakivisi-zz/lolo_bootcamp | b88c3826826485fe250e686eb96854ef13228f9d | 7240def3d6ade683742ec2426037ef5d155bfeab | refs/heads/master | 2021-06-01T05:58:20.789641 | 2016-06-20T09:01:58 | 2016-06-20T09:01:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | def fibonacci(x):
fib_list = []
f = 1
i=0
while i<x:
if i<=1:
fib_list.append(f)
else:
f = fib_list[-1] + fib_list[-2] #says that f = the sum of the last two f's in the series
fib_list.append(f)
i+=1
else:
return fib_list
print fibonacci(8) | [
"loiceandia@gmail.com"
] | loiceandia@gmail.com |
08a048fd6fab92b806d56314736965328a16bd91 | 49d383fbda1dace5307303a0985e9331e42f310e | /ciTool/Potluck_cloudera/Testcases/solution/Platform/verify_java_version.py | b5b8c5faff23d0b53f4b2041fa0e236993f94984 | [] | no_license | tarun-batra-guavus/qa | 44248e6068b545354fc27ab754344b53fee65b3e | 62c48b10e977c20b33e6f95e4aacf90d3dacd5e2 | refs/heads/master | 2021-01-01T15:23:35.112534 | 2017-07-18T15:01:11 | 2017-07-18T15:01:11 | 97,610,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | """
Purpose
=======
Check that Java version on its respective Nodes are as User Specified
Test Steps
==========
1. Goto to shell
2. Execute "rpm -qa | grep -i "componentName" | grep "version"" and check that Java version on all Ansible machines are as User Specified
"""
from potluck.nodes import connect, get_nodes_by_type
from potluck.logging import logger
from potluck.reporting import report
from potluck.parsing import parser
obj_parser = parser()
versiondict ={}
### Creating A dictionary with Structure [Component]:[Version] ###
versiondict = obj_parser.create_dict_version("userinput/version_info_old.txt")
version=versiondict["JAVA"]
javanodes = get_nodes_by_type("JAVA")
if (not javanodes):
report.fail("No Java nodes in the testbed ")
for node_alias in javanodes:
logger.info("Checking that Java version on all Java machines are as User Specified")
node = connect(node_alias)
flag = node.grepVersionbyCommand("Java", "java -version",version)
if flag ==1:
logger.info("Java version on %s Node nodes are as User Specified"%node_alias)
else:
report.fail("Java version on %s Node are not as User Specified"%node_alias)
| [
"tarun.batra@tarun-batra.local"
] | tarun.batra@tarun-batra.local |
9c4fb664ee8464c5663b6903fbc9af27727f27dd | 56c3b5e54e7bda2ac2e5434c369b4b2e873dcd51 | /tests/test_serializer.py | 9ec1f3b6729e8fd21669db603a83b7c14ab0bfc8 | [
"MIT"
] | permissive | SimonCqk/cannondb | 6b3b484c2510642a32d483ece88a2f7fb2d0f7e0 | 6623a09ea90e45f062d5b784b262b0b49d57bee8 | refs/heads/master | 2021-04-25T19:06:14.353718 | 2018-12-17T06:04:11 | 2018-12-17T06:04:11 | 108,551,097 | 15 | 0 | MIT | 2018-04-24T07:53:29 | 2017-10-27T13:41:58 | Python | UTF-8 | Python | false | false | 2,605 | py | from uuid import UUID
from cannondb.serializer import IntSerializer, FloatSerializer, DictSerializer, ListSerializer, StrSerializer, \
UUIDSerializer
def test_int_serializer():
s = IntSerializer.serialize(1)
assert IntSerializer.deserialize(s) == 1
s = IntSerializer.serialize(-1)
assert IntSerializer.deserialize(s) == -1
s = IntSerializer.serialize(9999999)
assert IntSerializer.deserialize(s) == 9999999
s = IntSerializer.serialize(-9999999)
assert IntSerializer.deserialize(s) == -9999999
def test_float_serializer():
"""
convert float -> str and str -> float has unavoidable precision loss
"""
s = FloatSerializer.serialize(0.01)
assert abs(FloatSerializer.deserialize(s) - 0.01) < 0.00001
s = FloatSerializer.serialize(-0.01)
assert abs(FloatSerializer.deserialize(s) + 0.01) < 0.00001
s = FloatSerializer.serialize(1234.56789)
assert abs(FloatSerializer.deserialize(s) - 1234.56789) < 0.0001
def test_str_serializer():
s = StrSerializer.serialize('cannondb')
assert StrSerializer.deserialize(s) == 'cannondb'
s = StrSerializer.serialize('php is the best language in the world')
assert StrSerializer.deserialize(s) == 'php is the best language in the world'
def test_dict_serializer():
s = DictSerializer.serialize({'a': 1, 'b': 2, 'c': 3})
assert DictSerializer.deserialize(s) == {'a': 1, 'b': 2, 'c': 3}
s = DictSerializer.serialize({'a': [1, 2, 3], 'b': ['a', 'b', 'c']})
assert DictSerializer.deserialize(s) == {'a': [1, 2, 3], 'b': ['a', 'b', 'c']}
s = DictSerializer.serialize({'a': 0.1, 'b': 'test', 'c': [1, 2, 3], 'd': {'a': 1, 'b': 2, 'c': 3}})
assert DictSerializer.deserialize(s) == {'a': 0.1, 'b': 'test', 'c': [1, 2, 3], 'd': {'a': 1, 'b': 2, 'c': 3}}
def test_list_serializer():
s = ListSerializer.serialize([1, 2, 3, 4])
assert ListSerializer.deserialize(s) == [1, 2, 3, 4]
s = ListSerializer.serialize(['1', '2', '3', '4'])
assert ListSerializer.deserialize(s) == ['1', '2', '3', '4']
s = ListSerializer.serialize(['test', 1, 'now', 2])
assert ListSerializer.deserialize(s) == ['test', 1, 'now', 2]
s = ListSerializer.serialize((1, 2, 3, 4, 5))
assert ListSerializer.deserialize(s) == [1, 2, 3, 4, 5]
def test_uuid_serializer():
u = UUID('{12345678-1234-5678-1234-567812345678}')
s = UUIDSerializer.serialize(u)
assert u.int == UUIDSerializer.deserialize(s).int
u = UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
s = UUIDSerializer.serialize(u)
assert u.int == UUIDSerializer.deserialize(s).int
| [
"cqk0100@gmail.com"
] | cqk0100@gmail.com |
f3d3656ff52a22f3564e6b11a4f4d47ead489ea1 | bce26e2d316ba7bdb1ca03cf7ade28744b12a14a | /dice.py | 24dc39fdcff5a5eab478fcfc4b1fdb5afbbd67a7 | [] | no_license | bacevedo1/gabe2o2.github.io | 3a587bbb9910e8a7c70a78709b16ccf16f599a3f | 5586d341171c231ba76baf8614810563ccf04780 | refs/heads/master | 2020-04-01T13:56:21.290021 | 2015-05-27T17:33:34 | 2015-05-27T17:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from random import randint
number = {2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0}
for i in range(500):
rolls = randint(1,6) + randint(1,6)
number[rolls] += 1
for z in number:
print (number[z]/500)
| [
"ggomez@kippnyccp.org"
] | ggomez@kippnyccp.org |
50927c688affde7e346ff92204989892dfd8072b | 3e447787927f33dba9048ed9c9c870c43c356b9a | /eval/miscc/config.py | 1443b2e9dde71be412fcc94345e51757769168fb | [
"MIT"
] | permissive | yliu-code/ATTNGANwithBERT | d41e84188ab053c1d5480397f59e2a861b41d307 | f11a186a7f938ba3bf548326095640ca70ef34ec | refs/heads/master | 2020-08-05T08:25:03.401140 | 2019-10-03T01:03:43 | 2019-10-03T01:03:43 | 212,464,049 | 11 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | from __future__ import division
from __future__ import print_function
import os.path as osp
import numpy as np
from easydict import EasyDict as edict
__C = edict()
cfg = __C
# Dataset name: flowers, birds
__C.DATASET_NAME = 'google'
__C.CONFIG_NAME = 'attn2'
__C.DATA_DIR = ''
__C.GPU_ID = 0
__C.CUDA = False
__C.WORKERS = 1
__C.RNN_TYPE = 'LSTM' # 'GRU'
__C.B_VALIDATION = False
__C.TREE = edict()
__C.TREE.BRANCH_NUM = 3
__C.TREE.BASE_SIZE = 64
# Training options
__C.TRAIN = edict()
__C.TRAIN.BATCH_SIZE = 64
__C.TRAIN.MAX_EPOCH = 600
__C.TRAIN.SNAPSHOT_INTERVAL = 2000
__C.TRAIN.DISCRIMINATOR_LR = 2e-4
__C.TRAIN.GENERATOR_LR = 2e-4
__C.TRAIN.ENCODER_LR = 2e-4
__C.TRAIN.RNN_GRAD_CLIP = 0.25
__C.TRAIN.FLAG = False
__C.TRAIN.NET_E = 'data/text_encoder100.pth'
__C.TRAIN.NET_G = 'data/coco_AttnGAN2.pth'
__C.TRAIN.B_NET_D = False
__C.TRAIN.SMOOTH = edict()
__C.TRAIN.SMOOTH.GAMMA1 = 5.0
__C.TRAIN.SMOOTH.GAMMA3 = 10.0
__C.TRAIN.SMOOTH.GAMMA2 = 5.0
__C.TRAIN.SMOOTH.LAMBDA = 1.0
# Modal options
__C.GAN = edict()
__C.GAN.DF_DIM = 64
__C.GAN.GF_DIM = 32
__C.GAN.Z_DIM = 100
__C.GAN.CONDITION_DIM = 100
__C.GAN.R_NUM = 2
__C.GAN.B_ATTENTION = True
__C.GAN.B_DCGAN = False
__C.TEXT = edict()
__C.TEXT.CAPTIONS_PER_IMAGE = 10
__C.TEXT.EMBEDDING_DIM = 256
__C.TEXT.WORDS_NUM = 25
| [
"noreply@github.com"
] | noreply@github.com |
60ed7a594f04db99923f3bfc26025c81208239d2 | 25b80ea321e5f336f4ffeb1ff3ca17a38ae7af3b | /Tourniquet 2019/Project Tourniquet.py | 287d0a854ce675891ae4346b8e383cdf6980a6b5 | [] | no_license | m4m6o/tourniquet2019 | eb203bb6410841068f5bc7e1f9de0b6b2344eced | 17628923d9ef0591ba83690abf27f026f96a9192 | refs/heads/master | 2020-08-27T11:06:17.575402 | 2019-10-24T19:05:46 | 2019-10-24T19:05:46 | 217,344,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,152 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# подключаем нужные библиотеки
import win32com.client as com_client # для записи в excel файл
import xlrd # для чтения из excel файла
import shutil # для создания excel файла с опоздавшими
import datetime # для работы с текущей датой
#создаем нужные переменные
################
a = []
headline1 = ""
headline2 = ""
headline3 = []
toname = {1 : 'янв.', 2 : 'фев.', 3 : 'март', 4 : 'апр.', 5 : 'май.', 6 : 'июнь', 7 : 'июль', 8 : 'авг.', 9 : 'сен.', 10 : 'окт.', 11 : 'ноя.', 12 : 'дек.'}
names = ['Омаров', 'Зуев', 'Свинолупов', 'Елюбаев', 'Салаватов', 'Нурмухамбетов', 'Иманмәлік']
###################
#создание класса для каждого ученика
############
class id:
def __init__(self, ind, name, clas, hour, minute):
self.ind = ind
self.name = name
self.clas = clas
self.hour = hour
self.minute = minute
self.mt = self.minute + self.hour * 60
if (len(self.clas) == 2):
self.clasnum = int(self.clas[0])
elif (len(self.clas) == 3):
self.clasnum = int(self.clas[0] + self.clas[1])
##########
#чтение данных из excel файла
#################################
def ReadData(Path):
global headline1, headline2, headline3
wb = xlrd.open_workbook(r'C:\Users\ilyas\OneDrive\Desktop\Tourniquet 2019' + Path, on_demand = True)
##wb = xlrd.open_workbook(r'C:\Users\FizMat\Desktop\projecy' + Path, on_demand = True)
ws = wb.sheet_by_name("Лист1")
headline1 = ws.cell(1, 1).value
headline2 = ws.cell(2, 1).value
for i in range(8):
headline3.append(ws.cell(3, i).value)
stnum = 5
while (True):
ind = ws.cell(stnum, 0).value
name = ws.cell(stnum, 2).value
clas = ws.cell(stnum, 5).value
b = True
DateEnter = xlrd.xldate_as_tuple(ws.cell(stnum, 4).value, wb.datemode)
if (DateEnter[3] >= 9):
break
if (DateEnter[3] > 7 or (DateEnter[3] == 7 and DateEnter[4] > 45)):
if len(name) > 0:
for j in names:
if j in name:
b = False
if b:
a.append(id(ind, name, clas, DateEnter[3], DateEnter[4]))
stnum += 1
#############################
#конвертирование времени в строковый формат по текущим часу и минутой
#############################
def convert(x, y):
x = str(x)
y = str(y)
if (len(y) == 1):
return x + ":0" + y
return x + ":" + y
##################
#запись опоздавших в отдельный excel файл
##############################
def WriteToCurrentSheet(sheetname, a, wb):
global headline1, headline2, headline3
ws = wb.Worksheets(sheetname)
ws.Cells(1, 2).Value = headline1
ws.Cells(2, 2).Value = headline2
for i in range(8):
ws.Cells(4, i + 1).Value = headline3[i]
a.sort(key=lambda x: x.clas)
f = open("Late List.txt", "w", encoding="utf-8")
b = a[:]
c = []
ind = 0
for i in range(len(b)):
if i + 1 == len(b) or b[i].clas != b[i + 1].clas:
f.write(str(b[i].clas) + ' - ')
for j in range(ind, i + 1):
if b[i].name == '' or (i > 0 and b[i].name == b[i - 1].name):
continue
if(j != ind):
f.write(', ' + b[i].name)
else:
f.write(b[i].name)
ind = i + 1
f.write('\n')
f.close()
for i in range(len(a)):
ws.Cells(i + 5, 1).Value = a[i].ind
ws.Cells(i + 5, 2).Value = a[i].name
ws.Cells(i + 5, 3).Value = a[i].clas
ws.Cells(i + 5, 5).Value = convert(a[i].hour, a[i].minute)
num = len(a) + 5
while (ws.Cells(num, 1).Value):
for i in range(1, 9):
ws.Cells(num, i).Value = ""
num += 1
######################
#для каждого класса запись в отдельный лист
##############################
def WriteData(LateName):
global a
excel = com_client.Dispatch('Excel.Application')
excel.visible = False
print('Writing data...')
wb = excel.Workbooks.Open(r'C:\Users\ilyas\OneDrive\Desktop\Tourniquet 2019' + LateName)
##wb = excel.Workbooks.Open(r'C:\Users\FizMat\Desktop\projecy' + LateName)
WriteToCurrentSheet("123", a, wb)
wb.Save()
excel.Application.Quit()
print("Done!")
print("programm maded by:")
for i in names:
print(" " + i)
######################################
#создание файла с опоздавшими по заданному образцу
######################
def Create(Latename):
shutil.copy(r'C:\Users\ilyas\OneDrive\Desktop\Tourniquet 2019\Образец.xlsx', r'C:\Users\ilyas\OneDrive\Desktop\Tourniquet 2019\tmp')
shutil.move(r'C:\Users\ilyas\OneDrive\Desktop\Tourniquet 2019\tmp\Образец.xlsx', r'C:\Users\ilyas\OneDrive\Desktop\Tourniquet 2019' + Latename)
####################
#определение имени исходного файла в зависимости от текущей даты
########################
def MakePath():
now = datetime.date.today()
st = '\Время прихода - время ухода за '
st += str(now.day) + ' ' + toname[now.month] + ' ' + str(now.year) + '.xlsx'
return st
#########################
#определение имени файла с опоздавшими в зависимости от текущей даты
############################
def MakeLateName():
now = datetime.date.today()
st = '\Опоздавшие за '
st += str(now.day) + ' ' + toname[now.month] + ' ' + str(now.year) + '.xlsx'
return st
########################
# главная функция которая вызвает другие
######################
def main(Path, LateName):
Create(LateName) # создание файла с опоздавшими
ReadData(Path) # чтение данных из файла и запись в отдельный список
WriteData(LateName) # запись опоздавших учеников в отдельный excel файл
#####################
main(MakePath(), MakeLateName()) # вызов главной функции | [
"noreply@github.com"
] | noreply@github.com |
f496808570d534acea82cfe877a130b206da08d4 | a973f336765a31550cc9661be57e0384c317fc38 | /ejemplo3/proyectoUno/administrativo/urls.py | 8ef4aee1be71fedb011dd6c3682a4c4b57228cee | [] | no_license | PlataformasWeb-P-AA2021/clase03-2bim-ricardoifc | 0a40d61f351525ab87cb2ce1f0982804cb50df37 | 35c42f8e5c3420bfa66103dcb45a75c5b27d5a5a | refs/heads/main | 2023-06-19T17:46:12.663825 | 2021-07-16T17:47:59 | 2021-07-16T17:47:59 | 377,869,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | """
Manejo de urls para la aplicación
administrativo
"""
from django.urls import path
# se importa las vistas de la aplicación
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('estudiante/<int:id>', views.obtener_estudiante,
name='obtener_estudiante'),
]
| [
"66690702+github-classroom[bot]@users.noreply.github.com"
] | 66690702+github-classroom[bot]@users.noreply.github.com |
2ef3099dcf36cfaa0932749b69643b1c445f167e | ab2c96d266cd605c5502ef0d91fa60fd5bd1e7c7 | /websocket_main.py | 167e5961c1b629e2f2074a7cd311525b3de82b6d | [] | no_license | harsh8088/py_ocpp16 | 89341e0c042c78af13ea250327ccb0d494c6c5a5 | 6bfa4a91e0e8c5e79f3fc502a9d00aff57cad484 | refs/heads/master | 2023-03-22T18:28:50.551685 | 2021-03-16T07:41:51 | 2021-03-16T07:41:51 | 348,254,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | import asyncio
import websockets
from datetime import datetime
from ocpp.routing import on
from ocpp.v16 import ChargePoint as cp
from ocpp.v16.enums import Action, RegistrationStatus
from ocpp.v16 import call_result
class MyChargePoint(cp):
@on(Action.BootNotification)
def on_boot_notitication(self, charge_point_vendor, charge_point_model, **kwargs):
return call_result.BootNotificationPayload(
current_time=datetime.utcnow().isoformat(),
interval=10,
status=RegistrationStatus.accepted
)
async def on_connect(websocket, path):
""" For every new charge point that connects, create a ChargePoint instance
and start listening for messages.
"""
charge_point_id = path.strip('/')
cp = MyChargePoint(charge_point_id, websocket)
await cp.start()
async def main():
server = await websockets.serve(
on_connect,
'0.0.0.0',
9000,
subprotocols=['ocpp1.6']
)
await server.wait_closed()
if __name__ == '__main__':
asyncio.run(main())
| [
"52234750+ev-harsh@users.noreply.github.com"
] | 52234750+ev-harsh@users.noreply.github.com |
8a4b3bf9b7ba9364d6b9e042be5f9f11431cbad1 | 801afed7debbb04a0c6514719a8de02f6239893e | /lamb/ebnf/transformer/ExpansionTransformer.py | 1768f80cc78722fd8eaf3fb06eb4b3a7746b68d2 | [
"MIT"
] | permissive | lay-it-out/lamb | 05006f271c3eaa5d94c2f163e749e3065aea06fe | 3eff8bde37b8f912766ae3c29619b0ceee283009 | refs/heads/main | 2023-08-12T17:54:25.864682 | 2023-07-27T05:26:18 | 2023-07-27T05:26:18 | 662,709,022 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | from typing import List
from lamb.ebnf.ast import *
def expand_rules(rules: List[RuleNode]) -> List[RuleNode]:
ret = []
for rule in rules:
if isinstance(rule.expression, BinaryExpressionNode) \
and rule.expression.op == BinaryExpressionNode.BinaryOp.OrOp:
ret.append(RuleNode(
rule.variable,
rule.expression.expr1,
original_text=f'{rule.variable} ::= {rule.expression.expr1.original_text}',
tree_id=rule.expression.expr1.tree_id
))
ret.append(RuleNode(
rule.variable,
rule.expression.expr2,
original_text=f'{rule.variable} ::= {rule.expression.expr2.original_text}',
tree_id=rule.expression.expr2.tree_id
))
else:
ret.append(rule)
return ret
| [
"me@panda2134.site"
] | me@panda2134.site |
83015a912a969bacc8ea47d07520f4cefab2b708 | f80bcbb2db66f8e62c5f1a97d5109e9bd1f5c545 | /ChangeSeries.py | dd142a3143ab349f1ea207a7b2ce67e83b20904d | [] | no_license | jeffcarter-github/PygorPro | 79992796a094201ad8222f3ee8a81987a1de1bd8 | 91b3cd5d8faf57ab2743d7eb8db74316578bb95c | refs/heads/master | 2021-01-10T09:02:11.764300 | 2016-02-16T16:57:06 | 2016-02-16T16:57:06 | 51,124,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | from GUI_Change_Series_Type import GUI_Change_Series_Type
class ChangeSeries(GUI_Change_Series_Type):
def __init__(self, lst_of_dataFrames):
GUI_Change_Series_Type.__init__(self, lst_of_dataFrames)
self.series_names = []
def on_dataFrame_choice(self, event):
pass
def on_series_selection(self, event):
pass
def add_selections_to_gui(self, event):
pass
def on_cancel(self, event):
self.Destroy()
def on_ok(self, event):
self.Destroy() | [
"email.jeff.carter@gmail.com"
] | email.jeff.carter@gmail.com |
eefe20163388b5773ce3e6f6ef0646037a137ef0 | b84d00866f9e2c607eb57f0be5701121ab873e2f | /venv/Scripts/easy_install-3.8-script.py | d388c40f7a007e537476f9dbfb44930c992ef232 | [] | no_license | hugodoser/CitilinkParser | b585c60091c26d1c6aa328c6139050166a433995 | a165098563a26afe281dc9dc371718974bf504ce | refs/heads/master | 2022-11-07T07:29:50.750951 | 2020-06-29T12:40:04 | 2020-06-29T12:40:04 | 275,809,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!C:\Users\hugod\PycharmProjects\PriceParser\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"elfimov@kg.ru"
] | elfimov@kg.ru |
b7eca7ba03157b9c372ca880c6db2b32cc1b5fc8 | b915cceeae1823a6a52a06acc1f33c6dfb88f509 | /tworaven_apps/ta2_interfaces/ta2_score_solution_helper.py | 0b8754c34ef0e8e4df69086bc4599c41076f97a9 | [
"Apache-2.0"
] | permissive | ginfo-cflex/TwoRavens | 0fdf71eafbfbc881b8659c4ad553764088a3199c | 3c84367f1eea297b1d9ed2aecaefee52ca66d203 | refs/heads/master | 2022-06-30T11:37:11.156781 | 2020-03-23T17:49:52 | 2020-03-23T17:49:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,086 | py | """
Used to assist with TA2 calls, specifically:
(1) ScoreSolution
(2) GetScoreSolutionResults (contains fitted_solution_id)
"""
import logging
from django.conf import settings
from tworaven_apps.ta2_interfaces.websocket_message import WebsocketMessage
from tworaven_apps.raven_auth.models import User
from tworaven_apps.utils.basic_err_check import BasicErrCheck
from tworaven_apps.utils.basic_response import (ok_resp, err_resp)
from tworaven_apps.utils.json_helper import \
(json_loads, json_dumps, get_dict_value)
from tworaven_apps.utils.proto_util import message_to_json
from tworaven_apps.ta2_interfaces.ta2_connection import TA2Connection
from tworaven_apps.ta2_interfaces.stored_data_util import StoredRequestUtil
from tworaven_apps.ta2_interfaces.req_search_solutions import score_solution
from tworaven_apps.ta2_interfaces.models import \
(StoredRequest, StoredResponse)
from tworaven_apps.ta2_interfaces import static_vals as ta2_static
from tworaven_apps.behavioral_logs.log_entry_maker import LogEntryMaker
from tworaven_apps.behavioral_logs import static_vals as bl_static
import core_pb2
import grpc
from google.protobuf.json_format import \
(Parse, ParseError)
from tworavensproject.celery import celery_app
LOGGER = logging.getLogger(__name__)
class ScoreSolutionHelper(BasicErrCheck):
"""Helper class to run TA2 call sequence"""
def __init__(self, pipeline_id, websocket_id, user_id, score_params, **kwargs):
"""initial params"""
self.pipeline_id = pipeline_id
self.websocket_id = websocket_id
self.user_id = user_id
self.user_object = None
self.score_params = score_params
self.search_id = kwargs.get('search_id')
self.session_key = kwargs.get('session_key', '')
self.get_user()
self.check_score_params()
def get_user(self):
"""Fetch the user"""
if self.has_error():
return
try:
self.user_object = User.objects.get(pk=self.user_id)
except User.DoesNotExist:
self.add_err_msg('No user found for id: %s' % self.user_id)
def check_score_params(self):
"""Check that "score_params" has all of the required sections
Except for 'solutionId', params set at:
file: app.js
function: getScoreSolutionDefaultParameters
"""
if self.has_error():
return False
if not isinstance(self.score_params, dict):
self.add_err_msg('fit params must be a python dict')
return False
# Iterate through the expectd keys
#
expected_keys = [ta2_static.KEY_SOLUTION_ID, 'inputs',
'performanceMetrics',
'users', 'configuration']
for key in expected_keys:
if not key in self.score_params:
user_msg = ('score_params is missing key: %s') % \
(self.pipeline_id, key)
self.send_websocket_err_msg(ta2_static.SCORE_SOLUTION, user_msg)
return False
return True
@staticmethod
@celery_app.task(ignore_result=True)
def make_score_solutions_call(pipeline_id, websocket_id, user_id, score_params, **kwargs):
print('make_score_solutions_call 1')
assert pipeline_id, "pipeline_id must be set"
assert websocket_id, "websocket_id must be set"
assert user_id, "user_id must be set"
assert score_params, "score_params must be set"
score_helper = ScoreSolutionHelper(pipeline_id, websocket_id,
user_id, score_params, **kwargs)
if score_helper.has_error():
user_msg = ('ScoreSolution failure for pipeline (%s): %s') % \
(pipeline_id, score_helper.get_error_message())
ws_msg = WebsocketMessage.get_fail_message(\
ta2_static.SCORE_SOLUTION, user_msg)
ws_msg.send_message(websocket_id)
LOGGER.error(user_msg)
return
score_helper.run_process()
def run_process(self):
"""(1) Run ScoreSolution"""
if self.has_error():
return
# ----------------------------------
# Create the input
# ----------------------------------
LOGGER.info('ScoreSolutionHelper.run_process 2')
json_str_info = json_dumps(self.score_params)
if not json_str_info.success:
self.add_err_msg(json_str_info.err_msg)
return
json_str_input = json_str_info.result_obj
# ----------------------------------
# (2) Save the request
# ----------------------------------
stored_request = StoredRequest(\
user=self.user_object,
search_id=self.search_id,
pipeline_id=self.pipeline_id,
workspace='(not specified)',
request_type=ta2_static.SCORE_SOLUTION,
is_finished=False,
request=self.score_params)
stored_request.save()
# --------------------------------
# (2a) Behavioral logging
# --------------------------------
log_data = dict(session_key=self.session_key,
feature_id=ta2_static.SCORE_SOLUTION,
activity_l1=bl_static.L1_MODEL_SELECTION,
activity_l2=bl_static.L2_MODEL_SUMMARIZATION,
other=self.score_params)
LogEntryMaker.create_ta2ta3_entry(self.user_object, log_data)
# ----------------------------------
# Run ScoreSolution
# ----------------------------------
LOGGER.info('run ScoreSolution: %s', json_str_input)
fit_info = score_solution(json_str_input)
if not fit_info.success:
print('ScoreSolution err_msg: ', fit_info.err_msg)
StoredResponse.add_err_response(stored_request,
fit_info.err_msg)
self.send_websocket_err_msg(ta2_static.SCORE_SOLUTION,
fit_info.err_msg)
return
# ----------------------------------
# Parse the ScoreSolutionResponse
# ----------------------------------
response_info = json_loads(fit_info.result_obj)
if not response_info.success:
print('ScoreSolution grpc err_msg: ', response_info.err_msg)
StoredResponse.add_err_response(stored_request,
response_info.err_msg)
self.send_websocket_err_msg(ta2_static.SCORE_SOLUTION, response_info.err_msg)
return
result_json = response_info.result_obj
# ----------------------------------
# Get the requestId
# ----------------------------------
if not ta2_static.KEY_REQUEST_ID in result_json:
user_msg = (' "%s" not found in response to JSON: %s') % \
(ta2_static.KEY_REQUEST_ID, result_json)
StoredResponse.add_err_response(stored_request, user_msg)
self.send_websocket_err_msg(ta2_static.SCORE_SOLUTION, user_msg)
return
StoredResponse.add_success_response(stored_request,
result_json)
self.run_get_score_solution_responses(result_json[ta2_static.KEY_REQUEST_ID])
def send_websocket_err_msg(self, grpc_call, user_msg=''):
"""Send an error messsage over websockets"""
assert grpc_call, 'grpc_call is required'
user_msg = '%s error; pipeline %s: %s' % \
(grpc_call,
self.pipeline_id,
user_msg)
# ----------------------------------
# Send Websocket message
# ----------------------------------
ws_msg = WebsocketMessage.get_fail_message(grpc_call, user_msg)
ws_msg.send_message(self.websocket_id)
# ----------------------------------
# Log it
# ----------------------------------
LOGGER.info('ScoreSolutionHelper: %s', user_msg)
# ----------------------------------
# Add error message to class
# ----------------------------------
self.add_err_msg(user_msg)
def run_get_score_solution_responses(self, request_id):
"""(2) Run GetScoreSolutionResults"""
if self.has_error():
return
if not request_id:
self.send_websocket_err_msg(ta2_static.GET_SCORE_SOLUTION_RESULTS,
'request_id must be set')
return
# -----------------------------------
# (1) make GRPC request object
# -----------------------------------
params_dict = {ta2_static.KEY_REQUEST_ID: request_id}
params_info = json_dumps(params_dict)
try:
grpc_req = Parse(params_info.result_obj,
core_pb2.GetScoreSolutionResultsRequest())
except ParseError as err_obj:
err_msg = ('Failed to convert JSON to gRPC: %s') % (err_obj)
self.send_websocket_err_msg(ta2_static.GET_SCORE_SOLUTION_RESULTS,
err_msg)
return
# --------------------------------
# (2) Save the request to the db
# --------------------------------
stored_request = StoredRequest(\
user=self.user_object,
request_type=ta2_static.GET_SCORE_SOLUTION_RESULTS,
search_id=self.search_id,
pipeline_id=self.pipeline_id,
is_finished=False,
request=params_dict)
stored_request.save()
# --------------------------------
# (2a) Behavioral logging
# --------------------------------
log_data = dict(session_key=self.session_key,
feature_id=ta2_static.GET_SCORE_SOLUTION_RESULTS,
activity_l1=bl_static.L1_MODEL_SELECTION,
activity_l2=bl_static.L2_MODEL_SUMMARIZATION,
other=params_dict)
LogEntryMaker.create_ta2ta3_entry(self.user_object, log_data)
# --------------------------------
# (3) Make the gRPC request
# --------------------------------
core_stub, err_msg = TA2Connection.get_grpc_stub()
if err_msg:
return err_resp(err_msg)
msg_cnt = 0
try:
# -----------------------------------------
# Iterate through the streaming responses
# Note: The StoredResponse.id becomes the pipeline id
# -----------------------------------------
for reply in core_stub.GetScoreSolutionResults(\
grpc_req, timeout=settings.TA2_GRPC_LONG_TIMEOUT):
msg_cnt += 1
stored_response = None # to hold a StoredResponse object
# -----------------------------------------------
# Parse the response into JSON + store response
# -----------------------------------------------
msg_json_str = message_to_json(reply)
msg_json_info = json_loads(msg_json_str)
if not msg_json_info.success:
err_msg = ('Failed to convert JSON to gRPC: %s') % \
(err_obj,)
StoredResponse.add_stream_err_response(stored_request,
user_msg)
self.send_websocket_err_msg(\
ta2_static.GET_SCORE_SOLUTION_RESULTS,
err_msg)
# Wait for next response....
continue
result_json = msg_json_info.result_obj
# -----------------------------------------
# Looks good, save the response
# -----------------------------------------
stored_resp_info = StoredResponse.add_stream_success_response(\
stored_request, result_json)
# -----------------------------------------
# Make sure the response was saved (probably won't happen)
# -----------------------------------------
if not stored_resp_info.success:
# Not good but probably won't happen
# send a message to the user...
#
self.send_websocket_err_msg(\
ta2_static.GET_SCORE_SOLUTION_RESULTS,
stored_resp_info.err_msg)
#
StoredResponse.add_stream_err_response(\
stored_request, stored_resp_info.err_msg)
#
continue
# ---------------------------------------------
# Looks good! Get the StoredResponse
# - send responses back to WebSocket
# ---------------------------------------------
stored_response = stored_resp_info.result_obj
stored_response.set_pipeline_id(self.pipeline_id)
# ---------------------------------------------
# If progress is complete,
# send response back to WebSocket
# ---------------------------------------------
progress_val = get_dict_value(\
result_json,
[ta2_static.KEY_PROGRESS,
ta2_static.KEY_PROGRESS_STATE])
if (not progress_val.success) or \
(progress_val.result_obj != ta2_static.KEY_PROGRESS_COMPLETED):
user_msg = 'GetScoreSolutionResultsResponse is not yet complete'
LOGGER.info(user_msg)
# wait for next message...
continue
ws_msg = WebsocketMessage.get_success_message(\
ta2_static.GET_SCORE_SOLUTION_RESULTS,
'it worked',
msg_cnt=msg_cnt,
data=stored_response.as_dict())
LOGGER.info('ws_msg: %s' % ws_msg)
#print('ws_msg', ws_msg.as_dict())
ws_msg.send_message(self.websocket_id)
# stored_response.mark_as_sent_to_user()
except grpc.RpcError as err_obj:
stored_request.set_error_status(str(err_obj))
return
except Exception as err_obj:
stored_request.set_error_status(str(err_obj))
return
StoredRequestUtil.set_finished_ok_status(stored_request.id)
| [
"noreply@github.com"
] | noreply@github.com |
9a91cb2a6678a29e4113661c2a771527004beed2 | 3c097ba5122e40551c98cf89747e710eb453a91b | /pwd.py | 75e03071ad194e730f09519700a40496b6df8f65 | [] | no_license | ann056/password | 63b447188e2680ad0f99c6e2ba863faf27a15727 | 0b0381e8d9800ca8296c4cb1b16ab95110172086 | refs/heads/main | 2023-06-03T08:11:19.393329 | 2021-06-19T03:52:16 | 2021-06-19T03:52:16 | 377,924,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | password = 'a12345'
x = 3
while True:
pwd = input('請輸入密碼:')
if pwd == password:
print('登入成功')
else:
x = x - 1
if x == 0:
print('帳號鎖住')
break
else:
print('登入錯誤! 還有', x, '次機會')
| [
"ann9975656@gmail.com"
] | ann9975656@gmail.com |
317189b0c669303bb76ab30ecad1df6240ef6bb7 | f1f313c6c904d87d626dc8564ee5b80132dc5983 | /codebase/model/modules.py | 35d227fc04379827c51973d73508061b4a82f7c4 | [] | no_license | counterfactuals/AmortizedCausalDiscovery | 98875fcca1dbdbac1f9f2bdf9b0d8b893d9533c7 | 23a1cfb7dd2634ebb9e2f8f0bd5f0cc20ca0b131 | refs/heads/master | 2022-11-10T06:08:19.379156 | 2020-06-22T14:48:25 | 2020-06-22T14:48:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,421 | py | import torch.nn as nn
import torch.nn.functional as F
import math
import torch
from model import utils
class MLP(nn.Module):
"""Two-layer fully-connected ELU net with batch norm."""
def __init__(self, n_in, n_hid, n_out, do_prob=0.0, use_batch_norm=True, final_linear=False):
super(MLP, self).__init__()
self.fc1 = nn.Linear(n_in, n_hid)
self.fc2 = nn.Linear(n_hid, n_out)
self.bn = nn.BatchNorm1d(n_out)
self.dropout_prob = do_prob
self.use_batch_norm = use_batch_norm
self.final_linear = final_linear
if self.final_linear:
self.fc_final = nn.Linear(n_out, n_out)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.1)
elif isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
m.bias.data.fill_(0.1)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def batch_norm(self, inputs):
x = inputs.view(inputs.size(0) * inputs.size(1), -1)
x = self.bn(x)
return x.view(inputs.size(0), inputs.size(1), -1)
def forward(self, inputs):
# Input shape: [num_sims, num_things, num_features]
x = F.elu(self.fc1(inputs))
x = F.dropout(x, self.dropout_prob, training=self.training)
x = F.elu(self.fc2(x))
if self.final_linear:
x = self.fc_final(x)
if self.use_batch_norm:
return self.batch_norm(x)
else:
return x
class CNN(nn.Module):
def __init__(self, n_in, n_hid, n_out, do_prob=0.0):
super(CNN, self).__init__()
self.pool = nn.MaxPool1d(
kernel_size=2,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
)
self.conv1 = nn.Conv1d(n_in, n_hid, kernel_size=5, stride=1, padding=0)
self.bn1 = nn.BatchNorm1d(n_hid)
self.conv2 = nn.Conv1d(n_hid, n_hid, kernel_size=5, stride=1, padding=0)
self.bn2 = nn.BatchNorm1d(n_hid)
self.conv_predict = nn.Conv1d(n_hid, n_out, kernel_size=1)
self.conv_attention = nn.Conv1d(n_hid, 1, kernel_size=1)
self.dropout_prob = do_prob
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
m.bias.data.fill_(0.1)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, inputs):
# Input shape: [num_sims * num_edges, num_dims, num_timesteps]
x = F.relu(self.conv1(inputs))
x = self.bn1(x)
x = F.dropout(x, self.dropout_prob, training=self.training)
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.bn2(x)
pred = self.conv_predict(x)
attention = utils.my_softmax(self.conv_attention(x), axis=2)
edge_prob = (pred * attention).mean(dim=2)
return edge_prob
| [
"idnix@web.de"
] | idnix@web.de |
75a2fb525799df8a0e2d223697efd86e076313db | 067973ace96c1d194eda3b6941a855ade08600f1 | /robosuite/environments/sawyer_lift_vj.py | 007c87919858ec0ca5d72e2de50f7e71c0a9316e | [
"MIT"
] | permissive | v1viswan/dexnet_rrt_planner_surreal_robosuite | b66f90d0c7bdb6d0ea1494fc97ba271967fc88f2 | d66753963776ee4dd95b0b2b58a8618644e346f4 | refs/heads/master | 2020-11-24T09:13:53.541504 | 2019-12-14T18:40:05 | 2019-12-14T18:45:15 | 228,070,965 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,672 | py | from collections import OrderedDict
import numpy as np
from robosuite.utils.transform_utils import convert_quat
from robosuite.environments.sawyer import SawyerEnv
from robosuite.models.arenas import TableArena
from robosuite.models.objects import BoxObject, BallObject
from robosuite.models.robots import Sawyer
from robosuite.models.tasks import TableTopTask, UniformRandomSampler, FixedPositionSampler
import pickle
class SawyerLift_vj(SawyerEnv):
"""
This class corresponds to the lifting task for the Sawyer robot arm.
"""
def __init__(
self,
gripper_type="TwoFingerGripper",
table_full_size=(0.8, 0.8, 0.8),
table_friction=(1., 5e-3, 1e-4),
use_camera_obs=True,
use_object_obs=True,
reward_shaping=False,
placement_initializer=None,
gripper_visualization=False,
use_indicator_object=False,
has_renderer=False,
has_offscreen_renderer=True,
render_collision_mesh=False,
render_visual_mesh=True,
control_freq=10,
horizon=1000,
ignore_done=True,
camera_name="frontview",
camera_height=256,
camera_width=256,
camera_depth=False,
):
"""
Args:
gripper_type (str): type of gripper, used to instantiate
gripper models from gripper factory.
table_full_size (3-tuple): x, y, and z dimensions of the table.
table_friction (3-tuple): the three mujoco friction parameters for
the table.
use_camera_obs (bool): if True, every observation includes a
rendered image.
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_shaping (bool): if True, use dense rewards.
placement_initializer (ObjectPositionSampler instance): if provided, will
be used to place objects on every reset, else a UniformRandomSampler
is used by default.
gripper_visualization (bool): True if using gripper visualization.
Useful for teleoperation.
use_indicator_object (bool): if True, sets up an indicator object that
is useful for debugging.
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering.
render_collision_mesh (bool): True if rendering collision meshes
in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes
in camera. False otherwise.
control_freq (float): how many control signals to receive
in every second. This sets the amount of simulation time
that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
camera_name (str): name of camera to be rendered. Must be
set if @use_camera_obs is True.
camera_height (int): height of camera frame.
camera_width (int): width of camera frame.
camera_depth (bool): True if rendering RGB-D, and RGB otherwise.
"""
# settings for table top
self.table_full_size = table_full_size
self.table_friction = table_friction
# whether to use ground-truth object states
self.use_object_obs = use_object_obs
# reward configuration
self.reward_shaping = reward_shaping
# object placement initializer
# if placement_initializer:
# self.placement_initializer = placement_initializer
# else:
# self.placement_initializer = UniformRandomSampler(
# x_range=[-0.03, 0.03],
# y_range=[-0.03, 0.03],
# ensure_object_boundary_in_range=False,
# z_rotation=True,
# )
pos_list = []
pos_list.append([0,0,0])
pos_list.append([0.1,0,0])
pos_list.append([0,-0.10,0])
pos_list.append([0,0.10,0])
self.placement_initializer = FixedPositionSampler(pos_list)
super().__init__(
gripper_type=gripper_type,
gripper_visualization=gripper_visualization,
use_indicator_object=use_indicator_object,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
use_camera_obs=use_camera_obs,
camera_name=camera_name,
camera_height=camera_height,
camera_width=camera_width,
camera_depth=camera_depth,
)
def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
super()._load_model()
self.mujoco_robot.set_base_xpos([0, 0, 0])
# load model for table top workspace
self.mujoco_arena = TableArena(
table_full_size=self.table_full_size, table_friction=self.table_friction
)
if self.use_indicator_object:
self.mujoco_arena.add_pos_indicator()
# The sawyer robot has a pedestal, we want to align it with the table
self.mujoco_arena.set_origin([0.16 + self.table_full_size[0] / 2, 0, 0])
# initialize objects of interest
cube = BoxObject(
size_min=[0.020, 0.020, 0.020], # [0.015, 0.015, 0.015],
size_max=[0.020, 0.020, 0.020], # [0.018, 0.018, 0.018])
rgba=[1, 0, 0, 1],
)
cube_ = BoxObject(
size_min=[0.020, 0.020, 0.020], # [0.015, 0.015, 0.015],
size_max=[0.020, 0.020, 0.020], # [0.018, 0.018, 0.018])
rgba=[1, 0, 0, 1],
)
cube1 = BoxObject(
size_min=[0.120, 0.010, 0.080], # [0.015, 0.015, 0.015],
size_max=[0.120, 0.010, 0.080], # [0.018, 0.018, 0.018])
rgba=[0, 1, 0, 1],
)
cube2 = BoxObject(
size_min=[0.120, 0.010, 0.080], # [0.015, 0.015, 0.015],
size_max=[0.120, 0.010, 0.080], # [0.018, 0.018, 0.018])
rgba=[0, 1, 0, 1],
)
self.mujoco_objects = OrderedDict([("cube", cube), ("cube_", cube_),("cube1", cube1), ("cube2", cube2)])
self.n_objects = len(self.mujoco_objects)
# task includes arena, robot, and objects of interest
self.model = TableTopTask(
self.mujoco_arena,
self.mujoco_robot,
self.mujoco_objects,
initializer=self.placement_initializer,
)
self.model.place_objects()
def _get_reference(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._get_reference()
self.cube_body_id = self.sim.model.body_name2id("cube")
self.l_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.left_finger_geoms
]
self.r_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.right_finger_geoms
]
self.cube_geom_id = self.sim.model.geom_name2id("cube")
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
super()._reset_internal()
# reset positions of objects
self.model.place_objects()
# reset joint positions
init_pos = np.array([-0.5538, -0.8208, 0.4155, 1.8409, -0.4955, 0.6482, 1.9628])
init_pos += np.random.randn(init_pos.shape[0]) * 0.02
self.sim.data.qpos[self._ref_joint_pos_indexes] = np.array(init_pos)
def reward(self, action=None):
"""
Reward function for the task.
The dense reward has three components.
Reaching: in [0, 1], to encourage the arm to reach the cube
Grasping: in {0, 0.25}, non-zero if arm is grasping the cube
Lifting: in {0, 1}, non-zero if arm has lifted the cube
The sparse reward only consists of the lifting component.
Args:
action (np array): unused for this task
Returns:
reward (float): the reward
"""
reward = 0.
# sparse completion reward
if self._check_success():
reward = 1.0
# use a shaping reward
if self.reward_shaping:
# reaching reward
cube_pos = self.sim.data.body_xpos[self.cube_body_id]
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
dist = np.linalg.norm(gripper_site_pos - cube_pos)
reaching_reward = 1 - np.tanh(10.0 * dist)
reward += reaching_reward
# grasping reward
touch_left_finger = False
touch_right_finger = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if c.geom1 in self.l_finger_geom_ids and c.geom2 == self.cube_geom_id:
touch_left_finger = True
if c.geom1 == self.cube_geom_id and c.geom2 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom1 in self.r_finger_geom_ids and c.geom2 == self.cube_geom_id:
touch_right_finger = True
if c.geom1 == self.cube_geom_id and c.geom2 in self.r_finger_geom_ids:
touch_right_finger = True
if touch_left_finger and touch_right_finger:
reward += 0.25
return reward
def _get_observation(self):
"""
Returns an OrderedDict containing observations [(name_string, np.array), ...].
Important keys:
robot-state: contains robot-centric information.
object-state: requires @self.use_object_obs to be True.
contains object-centric information.
image: requires @self.use_camera_obs to be True.
contains a rendered frame from the simulation.
depth: requires @self.use_camera_obs and @self.camera_depth to be True.
contains a rendered depth map from the simulation
"""
di = super()._get_observation()
# camera observations
if self.use_camera_obs:
camera_obs = self.sim.render(
camera_name=self.camera_name,
width=self.camera_width,
height=self.camera_height,
depth=self.camera_depth,
)
if self.camera_depth:
di["image"], di["depth"] = camera_obs
else:
di["image"] = camera_obs
# low-level object information
if self.use_object_obs:
# position and rotation of object
cube_pos = np.array(self.sim.data.body_xpos[self.cube_body_id])
cube_quat = convert_quat(
np.array(self.sim.data.body_xquat[self.cube_body_id]), to="xyzw"
)
di["cube_pos"] = cube_pos
di["cube_quat"] = cube_quat
gripper_site_pos = np.array(self.sim.data.site_xpos[self.eef_site_id])
di["gripper_to_cube"] = gripper_site_pos - cube_pos
di["object-state"] = np.concatenate(
[cube_pos, cube_quat, di["gripper_to_cube"]]
)
return di
def _check_contact(self):
"""
Returns True if gripper is in contact with an object.
"""
collision = False
for contact in self.sim.data.contact[: self.sim.data.ncon]:
if (
self.sim.model.geom_id2name(contact.geom1)
in self.gripper.contact_geoms()
or self.sim.model.geom_id2name(contact.geom2)
in self.gripper.contact_geoms()
):
collision = True
break
return collision
def _check_success(self):
"""
Returns True if task has been completed.
"""
cube_height = self.sim.data.body_xpos[self.cube_body_id][2]
table_height = self.table_full_size[2]
# cube is higher than the table top above a margin
return cube_height > table_height + 0.04
def _gripper_visualization(self):
"""
Do any needed visualization here. Overrides superclass implementations.
"""
# color the gripper site appropriately based on distance to cube
if self.gripper_visualization:
# get distance to cube
cube_site_id = self.sim.model.site_name2id("cube")
dist = np.sum(
np.square(
self.sim.data.site_xpos[cube_site_id]
- self.sim.data.get_site_xpos("grip_site")
)
)
# set RGBA for the EEF site here
max_dist = 0.1
scaled = (1.0 - min(dist / max_dist, 1.)) ** 15
rgba = np.zeros(4)
rgba[0] = 1 - scaled
rgba[1] = scaled
rgba[3] = 0.5
self.sim.model.site_rgba[self.eef_site_id] = rgba
| [
"v1viswan@eng.ucsd.edu"
] | v1viswan@eng.ucsd.edu |
153cfd01633ca97232addf13f8245fc820adc4df | 6aa7e37718c98af578a0b30ad471800b0604b5e6 | /splider/article_helper_test.py | 811ae3d805383ca0916895666d186d580cba067f | [
"MIT"
] | permissive | WangHongshuo/Acfun_Comment_Splider | 3a351aaef419f19b58f1be90bdef8f802587b735 | 64bf822e9ceb59e296e9add14fd3ec6564efc406 | refs/heads/master | 2020-08-06T00:25:49.231990 | 2020-01-05T15:36:42 | 2020-01-05T15:36:42 | 212,770,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from article_helper import ArticleHelper
from url_config import ARTICLE_ZONE
def main():
article_helper = ArticleHelper()
article_list = article_helper.get_article_list(ARTICLE_ZONE[0].get('list_id'), ARTICLE_ZONE[0].get('realmIds'))
# 存入 articles sql的内容:aid(唯一), comment_count(用于判断是否更新comments sql)
for a in article_list:
print(a)
b = 2
a = 1
if __name__ == '__main__':
main()
| [
"iswanghongshuo@foxmail.com"
] | iswanghongshuo@foxmail.com |
b4576b0c26d7d39045f9bfb13fd5be9c1b1f2acc | e6b6a1b6b3552c26bedc6269683b02c4fe05691f | /tests/test_queue.py | 0de8ccd430f5bf6247aafe4649025c1c3e0919d3 | [
"BSD-3-Clause"
] | permissive | bwhmather/python-lalr | 7f76d78a890150348e092f0b40b317763c1d530c | 8a98db00c05ee7c13a5e5ec7b580d28e04cb43ca | refs/heads/master | 2022-02-07T01:17:59.589355 | 2022-01-30T00:11:52 | 2022-01-30T00:11:52 | 63,543,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | import pytest
from lalr.utils import Queue
def test_iteration():
tasks = ["A", "B", "C", "D", "E", "F"]
queue = Queue(tasks)
assert [task for task in queue] == tasks
def test_insertion_during_iteration():
queue = Queue([1])
for task in queue:
if task >= 128:
break
queue.add(task * 2)
assert list(queue.processed) == [1, 2, 4, 8, 16, 32, 64, 128]
def test_reinsertion_before_pop():
tasks = [1, 2, 3, 4, 5]
queue = Queue(tasks)
processed = []
processed += [queue.pop(), queue.pop()]
queue.add(3)
processed += [queue.pop(), queue.pop(), queue.pop()]
assert processed == tasks
assert not queue
with pytest.raises(IndexError):
queue.pop()
def test_reinsertion_after_pop():
tasks = [1, 2, 3, 4, 5]
queue = Queue(tasks)
processed = []
processed += [queue.pop(), queue.pop(), queue.pop()]
queue.add(3)
processed += [queue.pop(), queue.pop()]
assert processed == tasks
assert not queue
with pytest.raises(IndexError):
queue.pop()
def test_processed():
queue = Queue([1, 2, 3, 4, 5])
processed = set([queue.pop(), queue.pop()])
assert set(queue.processed) == processed
| [
"bwhmather@bwhmather.com"
] | bwhmather@bwhmather.com |
421acea2ffbf3cdace86642508a1e2820482a291 | 8bf7d1a0b1dec4b6807a02deeea1de7d1a8e1960 | /hog.py | cdadd7fda0d7728f2676c1a70520fb345e92455d | [
"MIT"
] | permissive | mcflugen/hog | ff3a1c7525d1a75efcc437105c247dfb84cad821 | 937d8b81a8fe1a8bf50e5346f281e25c72ec6816 | refs/heads/master | 2020-05-17T10:39:23.195225 | 2015-01-31T21:47:36 | 2015-01-31T21:47:36 | 30,127,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,441 | py | #! /usr/bin/env python
from __future__ import print_function
import os
import sys
import types
import pwd
import time
from collections import defaultdict
from datetime import datetime, timedelta
from stat import *
import pandas
def itertree(top):
for root, dir, files in os.walk(top):
for fname in files:
if not os.path.islink(fname):
yield os.path.abspath(os.path.join(root, fname))
class Hogs(object):
def __init__(self):
self._hogs = defaultdict(int)
self._files = defaultdict(int)
self._last = defaultdict(float)
def add(self, path):
try:
stat = os.lstat(path)
uid = stat.st_uid
except OSError as err:
pass
else:
self._hogs[uid] += stat.st_size
self._files[uid] += 1
self._last[uid] = max(self._last[uid], stat.st_mtime)
@classmethod
def from_path(clazz, paths):
hogs = clazz()
if isinstance(paths, types.StringTypes):
paths = [paths]
for path in paths:
for fname in itertree(path):
hogs.add(fname)
return hogs
@staticmethod
def _atime2age(atimes):
age = {}
now = datetime.now()
for user in atimes:
age[user] = (now - datetime.fromtimestamp(atimes[user])).days
return age
@staticmethod
def _uid2name(uids):
return dict([(uid, uid_to_name(uid)) for uid in uids])
def to_dataframe(self, columns=None, ascending=True):
df = pandas.DataFrame.from_dict({'user': Hogs._uid2name(self._hogs),
'bytes': self._hogs,
'files': self._files,
'last': Hogs._atime2age(self._last)})
df.set_index('user', inplace=True)
df.sort(columns=columns, ascending=ascending, inplace=True)
return df
def uid_to_name(uid):
try:
user = pwd.getpwuid(uid)
except KeyError:
return str(uid)
else:
return user.pw_name
unit_prefix = ['', 'K', 'M', 'G', 'T', 'P']
def bytes_to_string(bytes):
import math
try:
log_1024 = int(math.log(bytes)/math.log(1024))
except ValueError:
log_1024 = 0
return '%.1f %s' % (bytes / (1024. ** log_1024), unit_prefix[log_1024])
def main():
import argparse
parser = argparse.ArgumentParser(description='Find disk hogs.')
parser.add_argument('dirs', metavar='path', nargs='*',
help='Were to look for hogs', default=['.'])
parser.add_argument('--sort-by', choices=['uid', 'bytes', 'files', 'last'],
help='Sort hogs', default='bytes')
parser.add_argument('--reverse', action='store_true',
help='Biggest hogs first')
parser.add_argument('--silent', action='store_true', default=False,
help='No output to screen')
parser.add_argument('--pickle', type=str, default=None,
help='Save as pickle file')
args = parser.parse_args()
hogs = Hogs.from_path(args.dirs)
df = hogs.to_dataframe(columns=args.sort_by, ascending=not args.reverse)
if args.pickle:
df.to_pickle(args.pickle)
if not args.silent:
print(df.to_string(formatters={'bytes': bytes_to_string}))
if __name__ == '__main__':
main()
| [
"huttone@colorado.edu"
] | huttone@colorado.edu |
90ae9d9dd84f0557e4e92628820240e1bd04d388 | 9460d90253e94197233ac121456f19fde194a969 | /tests/tron/test_account.py | 6e4523a2bf8aa13410d5ee5425b2f2c04e440104 | [] | no_license | max-block/mb-tron | c64219cfb643946044a08eb87153d2a4a7811be7 | 560fb52716951d865bf991f33f9ff23bd02cebfb | refs/heads/master | 2023-06-06T09:58:08.687966 | 2021-06-30T08:17:47 | 2021-06-30T08:17:47 | 375,305,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | from mb_tron.tron import account
def test_generate_account():
acc1 = account.generate_account()
acc2 = account.generate_account()
assert acc1.address.startswith("T")
assert acc2.address.startswith("T")
assert acc1.address != acc2.address
assert account.is_valid_address(acc1.address)
assert account.is_valid_address(acc2.address)
def test_is_valid_address():
assert account.is_valid_address("TXcoPj81sYLef7TrEjKsgqcX7M9z1TCkZL")
assert not account.is_valid_address("TXcoPj81sYLef7TrEjKsgqcX7M9z1TCkZl")
assert not account.is_valid_address("TXcoPj81sYLef7TrEjKsgqcX7M9z1TCkZ")
def test_get_address_from_private_key():
private_key = "aaf2a3148255e466c7c924b1f4ba41b101f486622bf4bafd0b69a077d8cc83d3"
assert account.get_address_from_private_key(private_key) == "TXcoPj81sYLef7TrEjKsgqcX7M9z1TCkZL"
| [
"maxblock@pm.me"
] | maxblock@pm.me |
a89cab30f1281c4b2bf90fe374743b37ded0ff69 | d33f66545aec29582f353eee62b78739cf82fa74 | /src/ingestCora.py | eec084cd69591076b5a67ede161a65147ca2ef5b | [] | no_license | calnick1/nodes2021-link-prediction | ad193d58e20e4ea8e562cc9313e0a2ba61a7629d | 120c0b4bc1ef10473a6576531ddf88dd3e5b51ff | refs/heads/master | 2023-06-04T14:25:38.895755 | 2021-06-17T15:17:51 | 2021-06-17T15:17:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | from neo4j import GraphDatabase
cora_content = "../data/cora.content"
cora_cites = "../data/cora.cites"
subject_to_id = {
"Neural_Networks": 0,
"Rule_Learning": 1,
"Reinforcement_Learning": 2,
"Probabilistic_Methods": 3,
"Theory": 4,
"Genetic_Algorithms": 5,
"Case_Based": 6
}
id_to_subject = {v: k for k, v in subject_to_id.items()}
def readlines(path):
with open(path, 'r') as f:
return list(map(lambda line: line.strip(), f.readlines()))
def loadNodes(session):
for line in readlines(cora_content):
tokens = line.split(',')
ext_id = int(tokens[0])
subject_text = tokens[1]
subject = subject_to_id[subject_text]
features = list(map(int, tokens[2:]))
query = "CREATE (n:Paper {extId: $extId, subject: $subject, features: $features})"
session.run(query, {"extId": ext_id, "subject": subject, "features": features})
def loadRelationships(session):
for line in readlines(cora_cites):
tokens = line.split(',')
source_ext_id = int(tokens[0])
target_ext_id = int(tokens[1])
query = "MATCH (n:Paper), (m:Paper) WHERE n.extId = $source_ext_id AND m.extId = $target_ext_id MERGE (n)-[:CITES]->(m)"
session.run(query, {"source_ext_id": source_ext_id, "target_ext_id": target_ext_id})
with GraphDatabase.driver("neo4j://localhost", auth=("neo4j", "cora")) as driver:
with driver.session() as session:
loadNodes(session)
loadRelationships(session)
| [
"breakanalysis@gmail.com"
] | breakanalysis@gmail.com |
2e9288d48b1eb01dd9f89c442ac250c7f7eb7c21 | c2ba09216f52b0e76576364355cc5e3c428d0710 | /ctrip_scrapy/ctrip_scrapy/ctrip_redis_push/detail_parser.py | d56f86cf8ad58b73a8bf467f4ae943e6d6a77086 | [] | no_license | hail-linda/Scientific-computing-2-hours-in-Python | cfe2dc41a941a46cf8eeb9878d59bef0933fa0d6 | 86f763bcb474ac588f449c73745c09cc9ccdfdc1 | refs/heads/master | 2023-04-21T10:02:07.547346 | 2021-05-17T18:53:39 | 2021-05-17T18:53:39 | 331,648,727 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | from parser import Parser
from pymysql.err import InterfaceError
import json
class DetailParser(Parser):
def run(self):
sql = "SELECT id, response FROM house_detail_response WHERE status=0 AND parse_status=0"
self.cursor.execute(sql)
rows = self.cursor.fetchall()
data = []
ids = []
for r in rows:
response = json.loads(r['response'])
_result = response['result']
a = json.loads(_result)
b = a
# TODO parse procedure
try:
self.db.commit()
except InterfaceError as e:
self.handle_db_timeout(sql, data, e)
except Exception as e:
self.logger.exception(e)
self.db.rollback()
else:
pass
# format_strings = ','.join(['%s'] * len(ids))
# update_sql = "UPDATE house_detail_response SET parse_status=1 WHERE id IN (%s)" % format_strings
# self.cursor.execute(update_sql, tuple(ids))
# self.db.commit()
if __name__ == "__main__":
parser = DetailParser()
parser.run() | [
"Lixinda128"
] | Lixinda128 |
768dc9a5cdbd9c14c1d015c30ad64f4d57ad601e | e3df0e321e8bcf6e7d70644dccf5ea4f109580e8 | /byteprint/bp/core/installation/__init__.py | 0c5eea7661b22396d08471f9c25d3c7e0e5e4a21 | [
"MIT"
] | permissive | colingourlay/byteprint | 65af7b9e01299c1e62f4cb03c5641e12fcf39860 | 184ddb5eac48a48507e20553f82b2f16c1a29fda | refs/heads/master | 2021-01-13T02:25:56.834846 | 2010-05-20T14:47:53 | 2010-05-20T14:47:53 | 677,022 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | from django.contrib.auth.models import User
def is_installed():
try:
superusers = User.objects.filter(is_superuser=True)
if len(superusers) > 0:
return True
return False
except:
return False | [
"colin.j.gourlay@gmail.com"
] | colin.j.gourlay@gmail.com |
39c5695cb627b2cbc775e29d5e8df89250bac8eb | dcb1fed6d771ccdc61200bc1be0bb6ab8713cbea | /build/offboard/catkin_generated/pkg.develspace.context.pc.py | 1eae1684e503a10bdc1b59c200f2eef0a20b2e3e | [] | no_license | elisabetta42/Thesis | c006704626dc3aba2b487fdbf80ae64f543b2c82 | 2f0031ab3d439c7b2868b1f894018471032e7732 | refs/heads/master | 2021-05-14T02:12:11.095805 | 2018-06-17T23:40:33 | 2018-06-17T23:40:33 | 116,586,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "offboard"
PROJECT_SPACE_DIR = "/home/sdu/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"eseggioli@gmail.com"
] | eseggioli@gmail.com |
a17ec0f94663044d8ad121c70920cb880c8289a6 | 9eaa705b0fe4d477cc41a2b849f1689fa2a664f2 | /amisapp1/migrations/0050_lab_profile.py | 5f7e4da49bff7d203a37a6cf03aba847b4ff1a56 | [] | no_license | amit631308/check | c6d3b34b2a0075f376274c9929cfc9cda725b88a | e0c68f9386783d4d9837282f1df0c090b4173f77 | refs/heads/master | 2022-12-01T16:47:37.521328 | 2020-08-16T17:45:22 | 2020-08-16T17:45:22 | 287,515,465 | 1 | 0 | null | 2020-08-14T11:17:38 | 2020-08-14T11:17:38 | null | UTF-8 | Python | false | false | 1,136 | py | # Generated by Django 2.2.12 on 2020-08-16 15:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('amisapp1', '0049_auto_20200816_1206'),
]
operations = [
migrations.CreateModel(
name='lab_profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=20)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=20)),
('state', models.CharField(max_length=20)),
('pin', models.CharField(max_length=20)),
('country', models.CharField(max_length=20)),
('dob', models.CharField(max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"amit631308@gmail.com"
] | amit631308@gmail.com |
69a1e8c4b1c4414aa0fd5bb630f5ed30027ae42c | cf5149e28346bef5d77d827ddbb3303fc4a89ff4 | /bert4torch/lightmodel.py | dd1352373c5664ff83467f845f615b9d4013d713 | [
"MIT"
] | permissive | yotofu/t5-pegasus-chinese | 1e98794253f1e3253606868053a77f29aced679b | ef8de37ff2e2911ae308f5de937547a71945cdeb | refs/heads/main | 2023-09-03T13:23:43.968272 | 2021-11-15T17:14:32 | 2021-11-15T17:14:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks import Callback
import datetime
from pytorch_lightning.core.decorators import auto_move_data
class LightModel(pl.LightningModule):
def __init__(self, net, optimizer, loss_fn=None):
super().__init__()
self.net = net
self.save_hyperparameters("net")
self.optimizer = optimizer
self.loss_fn = loss_fn
def configure_optimizers(self):
return self.optimizer
@auto_move_data
def forward(self, batch):
with torch.no_grad():
return self.net(batch)
def training_step(self, batch, batch_idx):
logits = self.net(batch)
loss = self.net.compute_loss(logits, batch)
return loss
def print_bar(self):
nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.print("\n" + "=" * 80 + "%s" % nowtime)
def predict(self, data):
self.net.eval()
ret = []
for batch in data:
res = self(batch)
if self.on_gpu:
res = res.cpu()
res = res.numpy().tolist()
ret.extend(res)
return ret
| [
"806416908@qq.com"
] | 806416908@qq.com |
6ca76d92bdd9bb5935e9736f4f2df6192bfd5e25 | 2a269e46075037eba48051f08ef2aa3df11c291b | /person/migrations/0001_initial.py | 136a900ceaceae9b06f29f08673f2d2fad483cc9 | [] | no_license | praneshsaminathan/Django-Multi-Tenant | 755a9361c0c8d80cfcb2a39a1062641b130dae20 | acb02b60c475c7abc1f80e8c032ea68245db7449 | refs/heads/master | 2023-02-02T04:43:51.705540 | 2020-12-13T12:14:35 | 2020-12-13T12:14:35 | 320,832,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # Generated by Django 3.1.4 on 2020-12-12 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
]
| [
"pranesh"
] | pranesh |
c28fcd27808d7a35e66beec85cb206831386a2a1 | c582e3c7830c56030ceeea7de5e0ff8ab696a786 | /DJANGO/Django Forms/1 Django Forms basics/views.py | 56e0c26e0d02d0f506bb98fb579fc57c8278a0c5 | [] | no_license | kubalisowski/Docs | e4e23d3eb94b8116c1045934d886489d582fca9e | 54ce7261747ece0123dc5e977eb3df62a8240f4b | refs/heads/master | 2022-12-11T19:37:17.515147 | 2019-11-01T21:14:42 | 2019-11-01T21:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | from django.shortcuts import render
from . import forms ### might be also from basicapp import forms
def index(request): ### just for index page
return render(request, 'basicapp/index.html')
def form_name_view(request):
form = forms.FormName() ### Importing previously created class of form from form.py; empty instance of class
if request.method == 'POST':
form = forms.FormName(request.POST) ### form object filled with data from sent form (POST request)
if form.is_valid(): ### boolean value
print('VALIDATION OK!')
### Printing data grabbed from data sent via form --> .cleaned_data[] method;
print(form.cleaned_data['name']) ## 'name' variable declared in forms.py
print(form.cleaned_data['email'])
print(form.cleaned_data['text'])
return render(request, 'basicapp/form_page.html', {'form' : form})
| [
"kubalisowski94@gmail.com"
] | kubalisowski94@gmail.com |
9bdf0b23533683e150f463573dbbc503fff15af3 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/auto_rig_pro/auto_rig_datas_export.py | 0ad9f6059d3985af9e8018a74ef2027c38c661bb | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | {'eyelid_top.l': ('Transformation', (0.0, 0.0, 0.0, 0.0, 0.0, 1.5)), 'eyelid_bot.l': ('Transformation', (0.0, 0.0, 0.0, 0.0, 0.0, 1.5)), 'eyelid_top.r': ('Transformation', (0.0, 0.0, 0.0, 0.0, 0.0, 1.5)), 'eyelid_bot.r': ('Transformation', (0.0, 0.0, 0.0, 0.0, 0.0, 1.5)), 'c_foot_bank_01.r': ('Transformation', (0.0, 0.0, 0.0, 0.0, -0.25, 0.25)), 'c_foot_bank_02.r': ('Transformation', (0.0, 0.0, 0.0, 0.0, -0.25, 0.25)), 'c_foot_heel.r': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'c_toes_end.r': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'c_foot_bank_01.l': ('Transformation', (0.0, 0.0, 0.0, 0.0, -0.25, 0.25)), 'c_foot_bank_02.l': ('Transformation', (0.0, 0.0, 0.0, 0.0, -0.25, 0.25)), 'c_foot_heel.l': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'c_toes_end.l': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'toes_end_ref.l': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'toes_end_ref.r': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0))} | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
1b1b516e13b61afed8cb370bb7337ae07d892668 | 217726212a0b93095737c0015153314afd725871 | /podcasts/admin.py | 0e43953e30a8d0589b55a540c01ef89f8aef676b | [
"Apache-2.0"
] | permissive | gitter-badger/tapedrive | e605fba7474e18dbb4081364981fd1fd2d1060fe | 51ce4765b5e974ea19340ac2127a36597d8e7763 | refs/heads/master | 2020-03-16T19:17:14.556243 | 2018-05-10T13:52:59 | 2018-05-10T13:52:59 | 132,908,751 | 0 | 0 | null | 2018-05-10T14:04:32 | 2018-05-10T14:04:32 | null | UTF-8 | Python | false | false | 527 | py | from django.contrib import admin
from .models import Podcast, Episode, EpisodePlaybackState
# Register your models here.
@admin.register(Podcast)
class PodcastAdmin(admin.ModelAdmin):
# fields = ('title', )
pass
class PlaybackStateInline(admin.TabularInline):
model = EpisodePlaybackState
@admin.register(Episode)
class EpisodeAdmin(admin.ModelAdmin):
list_display = ('title', 'podcast', 'published')
readonly_fields = ('media_url', 'link', 'guid')
inlines = [
PlaybackStateInline,
]
| [
"mail@janwillhaus.de"
] | mail@janwillhaus.de |
5dab7e3bfdea2a2c594b3dad9518850e875f603f | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/B/buttub/basic_twitter_scraper_179.py | 43a79636f9de0b40da964a9dc909525b46726714 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,294 | py | ###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:BarackObama'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:BarackObama'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
2ee9450d96743b4f874b933239d1ce2444821e2f | 32617ad3bee46a9d5896fb05b07edbdcc77431b0 | /RDFM_RAW/factorization_machine_online.py | 40b2bed842d7d47b3c36a168e1363ced3a9dfd21 | [] | no_license | andreblumenau/RDFM | 21bbfc79afd644e803594cd4efddaf21579f8287 | 5dbf1b38c3b3aea29b21d846ebb841d264552ac2 | refs/heads/master | 2020-03-28T09:15:43.488559 | 2019-08-06T01:56:09 | 2019-08-06T01:56:09 | 148,024,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,291 | py | import numpy
import cpu_learning
from cpu_learning import optimize
import gc
from pre_process import DataProcessing #Talvez desnecessário
from metrics import evaluate
from metrics import evaluate_rmse
class FactorizationMachine:
def get_random_weight_matrix(self,number_of_features,number_of_latent_vectors):
model = numpy.random.ranf((number_of_features, number_of_latent_vectors))
model = model / numpy.sqrt((model*model).sum())
return model
def __init__(self,iterations,learning_rate,latent_vectors,regularization,slice_size,batch_size,
slice_patience,iteration_patience,slice_patience_threshold,iteration_patience_threshold):
if slice_size < batch_size:
raise ValueError('"slice_size" parameter cannot be smaller than "batch_size" parameter.')
if iteration_patience >= iterations:
raise ValueError('"iteration_patience" parameter cannot be smaller than "iterations" parameter.')
#"Private" properties
self.model = None
#Parameterized properties
self.iterations = iterations
self.learning_rate = learning_rate
self.latent_vectors = latent_vectors
self.regularization = regularization
self.slice_size = slice_size
self.batch_size = batch_size
self.slice_patience = slice_patience
self.iteration_patience = iteration_patience
self.slice_patience_threshold = slice_patience_threshold
self.iteration_patience_threshold = iteration_patience_threshold
def learn(self,trainX,trainY):
skip = 0
end = 0
patience_counter = 0
iteration_error = 0
last_iteration_error = 0
slice_count = numpy.floor(trainX.shape[0]/self.slice_size).astype(numpy.int32)
if self.slice_patience >= slice_count:
raise ValueError('"slice_size" parameter cannot be smaller than "batch_size" parameter.')
if self.model is None:
self.model = self.get_random_weight_matrix(trainX.shape[1],self.latent_vectors)
for j in range(1):#(slice_count):
skip = j*self.slice_size
end = ((j+1)*self.slice_size)
self.model,iteration_error,error_iter_array = optimize(
trainX[skip:end],
trainY[skip:end],
iterations = self.iterations,
alpha = self.learning_rate,
regularization = self.regularization,
weight_matrix = self.model,
batch_size = self.batch_size,
iteration_patience = self.iteration_patience,
iteration_patience_threshold = self.iteration_patience_threshold)
if numpy.abs(numpy.abs(iteration_error)-last_iteration_error) < self.slice_patience_threshold:
patience_counter = patience_counter+1
else:
patience_counter = 0
if patience_counter == self.slice_patience:
break;
last_iteration_error = numpy.abs(iteration_error)
gc.collect()
def predict(self,validationX,validationY,error_buffer=5):
rmse,error_by_index = evaluate(validationX,validationY,self.model)
if error_buffer > error_by_index.shape[0]:
error_buffer = error_by_index.shape[0]
self.smallest_error = error_by_index[0:error_buffer,1].astype(numpy.int32)
self.greatest_error = error_by_index[(len(error_by_index)-error_buffer):len(error_by_index),1].astype(numpy.int32)
self.error_buffer = error_buffer
#print("self.smallest_error",self.smallest_error)
#print("self.smallest_error.shape",self.smallest_error.shape)
return rmse
def tardigrade(self,data_handler,neighbourhood_models):
indexes = numpy.hstack((self.smallest_error,self.greatest_error))
features,target = data_handler.features_and_target_from_indexes(indexes)
index_and_rmse = numpy.tile(1,(neighbourhood_models.shape[0],2))
for i in range(neighbourhood_models.shape[0]):
index_and_rmse[i][1] = i
index_and_rmse[i][0] = evaluate_rmse(features,target,neighbourhood_models[i])
index_and_rmse = index_and_rmse[index_and_rmse[:,0].argsort()]
tensor = numpy.tile(0,(1,self.model.shape[0],self.model.shape[1]))
tensor[0] = self.model
neighbourhood_models = neighbourhood_models[index_and_rmse[0:max(index_and_rmse.shape[0],self.error_buffer),1]]
self.model = numpy.vstack((neighbourhood_models,tensor)).mean(axis=0)
return
| [
"blumenau.andre@gmail.com"
] | blumenau.andre@gmail.com |
602ecc8bdddf40e9c5c4b9d685bd1df5dec327b2 | f89c7186b619d2196c53de268e616c324e3036a0 | /telecomdata_userchurn_logregwithpca/src/models/model_evaluation.py | b8206f1de647b6fd5092d61eb6cc57f4a009052b | [] | no_license | naveenkumarbs/myPythonWorkspace | f03e6f981725e72bb1b5d6e4199c59296a1247c6 | 88e9f395b92c02951e8850cfad0ffb9bc14a182d | refs/heads/main | 2023-08-22T07:10:56.802952 | 2021-10-31T13:59:47 | 2021-10-31T13:59:47 | 423,146,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | from sklearn import metrics
def generateAccuracyScore(actual, predicted):
accuracyScore = "{:2.2f}".format(metrics.accuracy_score(actual, predicted))
return accuracyScore
def generateROC_AUC_Score(actual, predicted):
roc_auc_score = "{:2.2f}".format(metrics.roc_auc_score(actual, predicted))
return roc_auc_score | [
"naveenkumar.6027@gmail.com"
] | naveenkumar.6027@gmail.com |
11eaad49e2f332332ac43910e59112ef2d27a95d | c0340c511cff5b40b4681c4d3238d807624c0323 | /models/revision/branching_entropy/branching_direction_entropy.py | 88c5d3c8f2bdf70871641d209a2d1963a11af595 | [] | no_license | m-hahn/grammar-optim | 5fa7ade47d2ad91f517c887ee2c65af24059069d | 07a1a80692a504bcafc8120a21c4dc9066b495ee | refs/heads/master | 2022-08-30T06:54:42.749264 | 2022-08-05T12:09:28 | 2022-08-05T12:09:28 | 156,456,167 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | #/u/nlp/bin/stake.py -g 11.5g -s run-stats-pretrain2.json "python readDataDistEnglishGPUFree.py"
import random
import sys
from math import log, exp
from random import random, shuffle
from corpusIterator_FuncHead import CorpusIteratorFuncHead
languages = ["Hindi", "Swedish", "German", "Urdu", "English", "Spanish", "Chinese", "Slovenian", "Estonian", "Norwegian", "Serbian", "Croatian", "Finnish", "Portuguese", "Catalan", "Russian", "Arabic", "Czech", "Japanese", "French", "Latvian", "Basque", "Danish", "Dutch", "Ukrainian", "Gothic", "Hebrew", "Hungarian", "Latin", "Persian", "Bulgarian", "Romanian", "Indonesian", "Greek", "Turkish", "Slovak", "Belarusian", "Galician", "Italian", "Lithuanian", "Polish", "Vietnamese", "Korean", "Tamil", "Irish", "Marathi", "Afrikaans", "Telugu", "Coptic", "Ancient_Greek", "Old_Church_Slavonic"]
with open("branching_entropy.tsv", "w") as outFile:
print >> outFile, "Language\tBranchingEntropy"
for language in languages:
posUni = set() #[ "ADJ", "ADP", "ADV", "AUX", "CONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"]
posFine = set() #[ "``", ",", ":", ".", "''", "$", "ADD", "AFX", "CC", "CD", "DT", "EX", "FW", "GW", "HYPH", "IN", "JJ", "JJR", "JJS", "-LRB-", "LS", "MD", "NFP", "NN", "NNP", "NNPS", "NNS", "PDT", "POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "-RRB-", "SYM", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WP$", "WRB", "XX" ]
deps = ["acl", "acl:relcl", "advcl", "advmod", "amod", "appos", "aux", "auxpass", "case", "cc", "ccomp", "compound", "compound:prt", "conj", "conj:preconj", "cop", "csubj", "csubjpass", "dep", "det", "det:predet", "discourse", "dobj", "expl", "foreign", "goeswith", "iobj", "list", "mark", "mwe", "neg", "nmod", "nmod:npmod", "nmod:poss", "nmod:tmod", "nsubj", "nsubjpass", "nummod", "parataxis", "punct", "remnant", "reparandum", "root", "vocative", "xcomp"]
#deps = ["acl", " advcl", " advmod", " amod", " appos", " aux", " case cc", " ccompclf", " compound", " conj", " cop", " csubjdep", " det", " discourse", " dislocated", " expl", " fixed", " flat", " goeswith", " iobj", " list", " mark", " nmod", " nsubj", " nummod", " obj", " obl", " orphan", " parataxis", " punct", " reparandum", " root", " vocative", " xcomp"]
header = ["index", "word", "lemma", "posUni", "posFine", "morph", "head", "dep", "_", "_"]
originalDistanceWeights = {}
orderTable = {}
keys = set()
vocab = {}
distanceSum = {}
distanceCounts = {}
depsVocab = set()
totalCount = 0
for partition in ["train", "dev"]:
for sentence in CorpusIterator(language,partition, storeMorph=True).iterator():
for line in sentence:
vocab[line["word"]] = vocab.get(line["word"], 0) + 1
depsVocab.add(line["dep"])
posFine.add(line["posFine"])
posUni.add(line["posUni"])
if line["dep"] == "root":
continue
posHere = line["posUni"]
posHead = sentence[line["head"]-1]["posUni"]
dep = line["dep"]
direction = "HD" if line["head"] < line["index"] else "DH"
key = (posHead, dep, posHere)
keyWithDir = (posHead, dep, posHere, direction)
orderTable[keyWithDir] = orderTable.get(keyWithDir, 0) + 1
keys.add(key)
distanceCounts[key] = distanceCounts.get(key,0.0) + 1.0
distanceSum[key] = distanceSum.get(key,0.0) + abs(line["index"] - line["head"])
totalCount += 1
#print orderTable
entropyTotal = 0
dhLogits = {}
for key in keys:
hd = orderTable.get((key[0], key[1], key[2], "HD"), 0) + 0.00000001
dh = orderTable.get((key[0], key[1], key[2], "DH"), 0) + 0.00000001
p_hd = hd/(hd+dh)
entropyHere = p_hd * log(p_hd) + (1-p_hd) * log(1-p_hd)
entropyTotal -= (hd+dh)/totalCount * entropyHere
print >> outFile, ("\t".join(map(str,[language, entropyTotal])))
| [
"mhahn29@gmail.com"
] | mhahn29@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.