index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
20,800 | 48b40efa8f4ecba9652a739bea39aa038bb3e1a8 | import os.path
import numpy as np
from sklearn_pipeline_play.pipeline import PipelineWrapper
from sklearn_pipeline_play.pipeline import DataIngest
from sklearn_pipeline_play.pipeline import MainWrapper
from sklearn_pipeline_play.pipeline import NoiseAdder
from pytest import approx
def test_DataIngest():
this_dir = os.path.dirname(os.path.abspath(__file__))
df = DataIngest(os.path.join(this_dir, "data", "simple_numbers.csv")).get()
assert list(df.columns) == ["a", "b", "c"] # 3 dimensions
assert len(df) == 9 # 9 samples
def test_PipelineWrapper():
this_dir = os.path.dirname(os.path.abspath(__file__))
df = DataIngest(os.path.join(this_dir, "data", "simple_numbers.csv")).get()
pw = PipelineWrapper({})
cluster_distances = pw.fit_transform(df)
# 9 samples. For each sample, 8 numbers representing the distance from each cluster centre
assert np.shape(cluster_distances) == (9, 8)
yaml_dict = {"cluster": {"n_clusters": 3,
"random_state": 0},
"dim_reduce": {"random_state": 0}}
pw = PipelineWrapper(yaml_dict)
assert pw._get_params_from_yaml_dict(yaml_dict) == {
"cluster__n_clusters": 3,
"cluster__random_state": 0,
"dim_reduce__random_state": 0,
}
cluster_distances = pw.fit_transform(df)
assert np.shape(cluster_distances) == (9, 3)
assert cluster_distances[0] == approx([1.73773119, 0.82441212, 2.26648292])
assert cluster_distances[-1] == approx([2.98023224e-08, 2.39860214e+00, 3.32752544e+00])
yaml_dict = {"cluster": {"n_clusters": 3,
"random_state": 0},
"dim_reduce": {"random_state": 0},
"add_noise": {"random_state": 0,
"loc": 10.0,
"scale": 0.1}
}
pw = PipelineWrapper(yaml_dict)
assert pw._get_params_from_yaml_dict(yaml_dict) == {
"cluster__n_clusters": 3,
"cluster__random_state": 0,
"dim_reduce__random_state": 0,
"add_noise__loc": 10.0,
"add_noise__random_state": 0,
"add_noise__scale": 0.1
}
cluster_distances = pw.fit_transform(df)
assert np.shape(cluster_distances) == (9, 3)
assert cluster_distances[0] == approx([1.89990178, 2.2093906 , 0.94026417])
assert cluster_distances[-1] == approx([2.98023224e-08, 3.53635370e+00, 2.63086021e+00])
def test_MainWrapper():
this_dir = os.path.dirname(os.path.abspath(__file__))
yaml_filename = os.path.join(this_dir, "data", "example.yaml")
m = MainWrapper(["--yaml", str(yaml_filename)])
cluster_distances = m.run()
assert np.shape(cluster_distances) == (9, 3)
assert cluster_distances[0] == approx([1.73773119, 0.82441212, 2.26648292])
assert cluster_distances[-1] == approx([2.98023224e-08, 2.39860214e+00, 3.32752544e+00])
def test_NoiseAdder():
n = NoiseAdder()
X = n.transform(np.array([[0, 0, 0], [0, 0, 0]]))
assert X.shape == (2, 3)
assert X[0] == approx([0.0, 0.0, 0.0])
assert X[1] == approx([0.0, 0.0, 0.0])
n = NoiseAdder(random_state=0, loc=1, scale=0.1)
X = n.transform(np.array([[10, 20, 30], [40, 50, 60]]))
assert X.shape == (2, 3)
assert X[0] == approx([11.17640523, 21.04001572, 31.0978738])
assert X[1] == approx([41.22408932, 51.1867558, 60.90227221])
n = n.fit([]) # should be trivial return
X = n.transform(np.array([[10, 20, 30], [40, 50, 60]]))
assert X.shape == (2, 3)
assert X[0] == approx([11.17640523, 21.04001572, 31.0978738])
assert X[1] == approx([41.22408932, 51.1867558, 60.90227221])
|
20,801 | 9f17f18bddd38c8bbdd769e861402b8b18174a22 | from typing import Callable, Generator, List, TextIO, Tuple, Optional, Union
from bs4 import BeautifulSoup
class SoupFromFile(object):
file_obj: TextIO
parser: str
def __init__(self, htmlfp: str, parser: str = 'lxml'): pass
def __enter__(self) -> BeautifulSoup: pass
class SoupSaver(object):
htmlfp: str
htmlsp: Optional[str]
file_obj: TextIO
parser: str
pretty: bool
formatter: Optional[Union[str, Callable]]
soup: BeautifulSoup
def __init__(self,
htmlfp: str,
htmlsp: Optional[str] = None,
parser: str = 'lxml',
pretty: bool = False,
formatter: Optional[Union[str, Callable]] = None): pass
def __enter__(self) -> BeautifulSoup: pass
def multi_soup_reader(
htmlfiles: List[str],
parser: str = 'lxml') -> Generator[Tuple[str, BeautifulSoup]]: pass
def multi_soup_saver(
htmlfiles: List[Union[str, Tuple[str, str]]],
parser: str = 'lxml',
pretty: bool = False,
formatter: Optional[Union[str, Callable]] = None) -> Generator[Tuple[str, BeautifulSoup]]: pass
|
20,802 | d4036a3783575dff7d91d6ddd847e3d4b9a96e80 | from scale_client import networks
from scale_client.sensors.threaded_virtual_sensor import ThreadedVirtualSensor
from scale_client.core.sensed_event import SensedEvent
from scale_client.networks.util import coap_response_success, coap_code_to_name, DEFAULT_COAP_PORT
from scale_client.util import uri
# this is basically replaceable by the coapthon HelperClient, but this version has a bugfix (see below)
from scale_client.networks.coap_client import CoapClient
import logging
log = logging.getLogger(__name__)
class CoapSensor(ThreadedVirtualSensor):
"""
This networked 'sensor' reads events from a remote CoAP server and publishes them internally.
You can configure it to use the 'observe' feature (default) to receive and publish events
asynchronously by specifying its 'subscriptions'. Alternatively, you can have it
simply poll the server with a GET request every *sample_interval* seconds by specifying
that parameter.
"""
DEFAULT_PRIORITY = 5
def __init__(self, broker,
topic=None,
event_type="coap_sensor",
hostname="127.0.0.1",
port=DEFAULT_COAP_PORT,
src_port=None,
username=None,
password=None,
timeout=300,
**kwargs):
"""
Many of these parameters are used to connect to the remote CoAP server.
:param broker:
:param topic: path of remote resource this 'sensor' monitors
:param event_type: used as the default event type for events we publish (if none already specified in retrieved event)
:param hostname: hostname of remote server this 'sensor' monitors
:param port: port of remote server
:param src_port: source port of client (default=None=automatically assigned by OS net stack)
:param username:
:param password:
:param timeout: timeout (in seconds) for a request (also used to periodically send observe requests
to keep the request fresh and handle the case where it was NOT_FOUND initially)
:param kwargs:
"""
super(CoapSensor, self).__init__(broker, event_type=event_type, **kwargs)
self._topic = topic
self._client = None # Type: coapthon.client.helperclient.HelperClient
self._hostname = hostname
self._port = port
self._src_port = src_port
if username is not None or password is not None:
log.warning("SECURITY authentication using username & password not yet supported!")
self._username = username
self._password = password
self.use_polling = self._sample_interval is not None
self._timeout = timeout
self._client_running = False
self._is_connected = False
# Used to properly cancel_observing on_stop()
self._last_observe_response = None
# We only want to use the threaded version of observe ONCE due to a bug in coapthon
self._observe_started = False
@property
def remote_path(self):
userinfo = None
if self._username:
userinfo = self._username
if self._password:
userinfo += ':' + self._password
return uri.build_uri(scheme='coap' if not userinfo else 'coaps',
host=self._hostname, port=self._port if self._port != DEFAULT_COAP_PORT else None,
path=self._topic, userinfo=userinfo)
def read_raw(self):
"""
This method is used for polling the specified topics (remote CoAP resources).
Hence, it will cycle through each of them in a round-robin fashion: one GET
request per sensor read interval.
:return: raw data
"""
resp = self._client.get(self._topic, timeout=self._timeout)
# XXX: when client closes the last response is a NoneType
if resp is None:
raise IOError("client shutting down...")
elif coap_response_success(resp):
return resp.payload
else:
raise IOError("CoAP response bad: %s" % resp)
def make_event_with_raw_data(self, raw_data, priority=None):
"""
This implementation assumes that the raw_data is a JSON-encoded SensedEvent already.
:param raw_data:
:param priority:
:return:
"""
# TODO: use priority? or log warning if someone tries to use it?
try:
ev = SensedEvent.from_json(raw_data)
networks.util.process_remote_event(ev, relay_uri=self.remote_path)
return ev
except ValueError as e:
log.error("Failed to decode SensedEvent from: %s" % raw_data)
raise e
def observe_topic(self):
"""Issue observe GET request for the topic of interest."""
def __bound_observe_callback(response):
return self.__observe_callback(response)
if self._client_running:
log.debug("observing CoAP resource at topic %s" % self._topic)
# WARNING: you cannot mix the blocking and callback-based method calls! We could probably fix the
# blocking one too, but we've had to extend the coapthon HelperClient to fix some threading problems
# that don't allow it to handle more than one callback-based call in a client's lifetime.
self._client.observe(self._topic, __bound_observe_callback, self._timeout)
else:
log.debug("Skipping observe_topics as client isn't running... maybe we're quitting?")
def __observe_callback(self, response):
"""
Handles the response from an observe GET request. If the response has an error,
we will try observing it again at a later time (self.timeout) as we the server
to be functional and for the resource to eventually be present.
:param response:
:type response: coapthon.messages.response.Response
:return:
"""
# XXX: when client closes the last response is a NoneType
if response is None:
return
elif coap_response_success(response):
event = self.make_event_with_raw_data(response.payload)
log.debug("received content update for observed resource: %s" % self.remote_path)
if self.policy_check(event):
self.publish(event)
self._last_observe_response = response
return True
else:
# TODO: handle error codes and try to re-observe?
# TODO: switch to polling if observe isn't supported by the server
log.debug("unsuccessful observe request with code: %s. Retrying later..." % coap_code_to_name(response.code))
self.timed_call(self._timeout, self.__class__.observe_topic)
return False
def on_start(self):
"""
If using polling, this will start the periodic sensor loop. If not (default), this will
use the CoAP 'observe' feature to asynchronously receive updates to the specified topic
and internally publish them as SensedEvents.
"""
self.run_in_background(self.__run_client)
def __run_client(self):
"""This runs the CoAP client in a separate thread."""
self._client = CoapClient(server_hostname=self._hostname, server_port=self._port, src_port=self._src_port)
self._client_running = True
if self.use_polling:
super(CoapSensor, self).on_start()
else:
self.observe_topic()
def on_stop(self):
if self._client and self._client_running:
if self._last_observe_response is not None:
self._client.cancel_observing(self._last_observe_response, True)
self._client.close()
self._client_running = False
super(CoapSensor, self).on_stop()
|
20,803 | d547b673f5549e552297fff17491747ac64f823d | # pylint: disable=W0212, C0111
""" The best way to start creating a simulation is by copying the start.py
file and other files from 'abce/template'.
To see how to create a simulation read :doc:`Walk_through`. In this module you
will find the explanation for the command.
This is a minimal template for a start.py::
from agent import Agent
from abce import *
simulation = Simulation(name='ABCE')
agents = simulation.build_agents(Agent, 'agent', 2)
for round in simulation.next_round():
agents.do('one')
agents.do('two')
agents.do('three')
simulation.graphs()
"""
import datetime
import time
import random
from .agent import Agent, Trade
from collections import defaultdict, OrderedDict
from abce.exceptions import NotEnoughGoods
class Simulation(object):
def __init__(self, name='abce', random_seed=None, processes=1):
"""
"""
self._messages = {}
self._resource_command_group = {}
self.num_agents = 0
self._build_first_run = True
self.resource_endowment = defaultdict(list)
self._start_round = 0
self.round = int(self._start_round)
self.messagess = [list() for _ in range(self.processes + 2)]
self.clock = time.time()
if random_seed is None or random_seed == 0:
random_seed = self.clock
random.seed(random_seed)
self.sim_parameters = OrderedDict(
{'name': name, 'random_seed': random_seed})
def advance_round(self, time):
print("\rRound" + str(time))
for g in self.groups.values():
for a in g:
a.step()
def __del__(self):
self.finalize()
def finalize(self):
if self._db_started:
self._db_started = False
print()
print(str("time only simulation %6.2f" %
(time.time() - self.clock)))
def build_agents(self, AgentClass, number=1, group_name=None,
parameters={}, agent_parameters=None):
self.number = number
if group_name is None:
group_name = AgentClass.__name__
self.sim_parameters.update(parameters)
agents = []
for i in range(self.number):
a = AgentClass(i, group_name, random.random())
a.init(parameters, agent_parameters)
agents.append(a)
return agents
|
20,804 | 317179c0368233d5d7c436c789d7cc01a71f0ca4 | # coding: utf-8
#
# Purely electronic model of the Reaction Center
#
# Calculations of absorption spectra with a realistic lineshape theory
# and with effective Gaussian lineshapes
#
#
#
#
# In[1]:
import os
import numpy
import quantarhei as qr
print(qr.Manager().version)
import matplotlib.pyplot as plt
plt.switch_backend('agg')
# In[2]:
pre_in = "in"
pre_out = "out"
# check if pre_out exists and is a directory
if not os.path.isdir(pre_out):
try:
os.makedirs(pre_out, exist_ok=True)
except:
raise Exception("Output directory name '"
+pre_out+"' does not represent a valid directory")
#
# Model from Jordanides at al. Ref. 1 is adjusted and extended by two CT states
#
#
jordanides = False
if jordanides:
offset = 0.0
offset_P = 0.0 #485.0
offset_P_M = offset_P + 0.0
h_shift = 0.0
sc_H = 1.0
sc_P = 1.0
else:
offset = 275
offset_P = 400 #485.0
offset_P_M = offset_P + 100.0
h_shift = 85.0
sc_H = 0.79
sc_P = 0.75
#
# Molecules
#
with qr.energy_units("1/cm"):
PM = qr.Molecule([0.0, 11610.0+offset_P_M], name="PM")
PL = qr.Molecule([0.0, 11610.0+offset_P], name="PL")
BM = qr.Molecule([0.0, 12220.0+offset], name="BM")
BL = qr.Molecule([0.0, 12370.0+offset], name="BL")
HL = qr.Molecule([0.0, 13020.0+offset-h_shift], name="HL")
HM = qr.Molecule([0.0, 13150.0+offset+h_shift], name="HM")
# CT states are effectively represented as "new molecules" in the system
PCT_M = qr.Molecule([0.0, 15200], name="PCT1")
PCT_L = qr.Molecule([0.0, 13550], name="PCT2") # 13500
#
# Transition dipole moment from Ref. 1 are scaled
#
dPM = numpy.array([ 0.8546, 0.5051, 0.1206])*sc_P
dPL = numpy.array([-0.9649, -0.0250, 0.2613])*sc_P
dHM = numpy.array([ 0.2749, -0.3694, -0.8877])*sc_H
dHL = numpy.array([ 0.0452, -0.9672, -0.2498])*sc_H
PM.set_dipole(0,1, dPM)
PL.set_dipole(0,1, dPL)
BL.set_dipole(0,1, [ 0.7782, 0.5332, 0.3317])
BM.set_dipole(0,1, [-0.9681, 0.1107, 0.2249])
HL.set_dipole(0,1, dHL)
HM.set_dipole(0,1, dHM)
#
# CT states are dark
#
PCT_M.set_dipole(1, 0, [0.0, 0.0, 0.0])
PCT_L.set_dipole(1, 0, [0.0, 0.0, 0.0])
molecules = [PM, PL, BM, BL, HL, HM, PCT_M, PCT_L]
# saving molecules without environment
qr.save_parcel(molecules, os.path.join(pre_out,"molecules.qrp"))
#
# Here we build the RC as an aggregate of molecules
#
mol3 = [PM, PL, BM]
agg = qr.Aggregate(molecules=mol3)
#
# Exciton interaction matrix
#
# values from Ref. 1
JP_77K_Jordanides = 575.0
JP_77K = JP_77K_Jordanides
#
# Fitted values of the model with CT states
# starting values of the manual search of best parameters are
# taken from Ref. 2
#
if jordanides:
JP = 395 #JP_77K
XCT_M = 0.0
XCT_L = 0.0
YCT = 0.0
else:
JP = 690 #575
XCT_M = 905 #1400
XCT_L = 755
YCT = 550 #350
# Factor of three is just to experiment with
PB_1 = -104.0
PB_2 = -94.0
LCT = 0
MCT = 0
# the interaction matrix is taken from
J_Matrix = numpy.array([
[ 0.0, JP, -16.0, PB_1, 19.9, -4.8, XCT_M, YCT],
[ JP, 0.0, PB_2, 2.8, -6.8, 18.0, YCT, XCT_L],
[ -16.0, PB_2, 0.0, 19.3, -7.5, 95.8, MCT, LCT],
[ PB_1, 2.8, 19.3, 0.0, 123.1, -7.9, LCT, MCT],
[ 19.9, -6.8, -7.5, 123.1, 0.0, 3.9, 0.0, 0.0],
[ -4.8, 18.0, 95.8, -7.9, 3.9, 0.0, 0.0, 0.0],
[ XCT_M, YCT, MCT, LCT, 0.0, 0.0, 0.0, 0.0],
[ YCT, XCT_L, LCT, MCT, 0.0, 0.0, 0.0, 0.0]
])
with qr.energy_units("1/cm"):
agg.set_resonance_coupling_matrix(J_Matrix[0:3,0:3])
#agg.save("RC_Model_40_4_adjusted_CT_no_environment_unbuilt.hdf5")
qr.save_parcel(agg, os.path.join(pre_out,
"RC_Model_40_4_adjusted_CT_no_environment_unbuilt.qrp"))
# In[3]:
# check that units were set correctly
rc = agg.resonance_coupling[1,0]
with qr.energy_units("1/cm"):
print(qr.convert(rc, "int"))
with qr.energy_units("1/cm"):
print(agg.get_resonance_coupling(1,0))
# In[4]:
# Bath correlation function
time = qr.TimeAxis(0.0, 1000, 1.0)
cfA_params = dict(ftype="OverdampedBrownian",
reorg=190, cortime=80, T=77, matsubara=100)
cfH_params = dict(ftype="OverdampedBrownian",
reorg=200, cortime=100, T=77, matsubara=100)
cfP_params = dict(ftype="OverdampedBrownian",
reorg=700, cortime=120, T=77, matsubara=100)
cfCT_params = dict(ftype="OverdampedBrownian",
reorg=3600, cortime=20, T=77, matsubara=200)
with qr.energy_units("1/cm"):
cfA = qr.CorrelationFunction(time, cfA_params)
cfH = qr.CorrelationFunction(time, cfH_params)
cfP = qr.CorrelationFunction(time, cfP_params)
cfCT = qr.CorrelationFunction(time, cfCT_params)
PM.set_transition_environment((0,1), cfP)
PL.set_transition_environment((0,1), cfP)
BM.set_transition_environment((0,1), cfA)
BL.set_transition_environment((0,1), cfA)
HL.set_transition_environment((0,1), cfH)
HM.set_transition_environment((0,1), cfH)
PCT_M.set_transition_environment((0,1), cfCT)
PCT_L.set_transition_environment((0,1), cfCT)
agg.build(mult=2)
#agg.save("RC_Model_40_4_adjusted_CT_no_vibrations_built.hdf5")
qr.save_parcel(agg, os.path.join(pre_out,
"RC_Model_40_4_adjusted_CT_no_vibrations_built.qrp"))
# In[5]:
#
# Refitted model of the Reaction Center using effective Gaussian lineshapes
#
#
#
#
# "Environment" modelled by dressing the states
#
molecules_eff = qr.load_parcel(os.path.join(pre_out,"molecules.qrp"))
agg_eff = qr.Aggregate(molecules=molecules_eff)
with qr.energy_units("1/cm"):
agg_eff.set_resonance_coupling_matrix(J_Matrix)
PMe = molecules_eff[0]
PLe = molecules_eff[1]
BMe = molecules_eff[2]
BLe = molecules_eff[3]
HMe = molecules_eff[4]
HLe = molecules_eff[5]
PCT_Me = molecules_eff[6]
PCT_Le = molecules_eff[7]
with qr.energy_units("1/cm"):
ee = PMe.get_energy(1)
PMe.set_energy(1,ee-80.0)
ee = PLe.get_energy(1)
PLe.set_energy(1,ee-80.0)
ee = BMe.get_energy(1)
BMe.set_energy(1,ee-85.0)
ee = BLe.get_energy(1)
BLe.set_energy(1,ee-85.0)
ee = HMe.get_energy(1)
HMe.set_energy(1,ee-75.0)
ee = HLe.get_energy(1)
HLe.set_energy(1,ee-75.0)
ee = PCT_Me.get_energy(1)
PCT_Me.set_energy(1,ee+230)
ee = PCT_Le.get_energy(1)
PCT_Le.set_energy(1,ee+230)
PMe.set_transition_width((0,1), qr.convert(630,"1/cm", "int"))
PLe.set_transition_width((0,1), qr.convert(630,"1/cm", "int"))
BMe.set_transition_width((0,1), qr.convert(180,"1/cm", "int"))
BLe.set_transition_width((0,1), qr.convert(180,"1/cm", "int"))
HMe.set_transition_width((0,1), qr.convert(155,"1/cm", "int"))
HLe.set_transition_width((0,1), qr.convert(155,"1/cm", "int"))
PCT_Me.set_transition_width((0,1), qr.convert(800,"1/cm", "int"))
PCT_Le.set_transition_width((0,1), qr.convert(800,"1/cm", "int"))
dPMe = numpy.array([ 0.8546, 0.5051, 0.1206])*0.76
dPLe = numpy.array([-0.9649, -0.0250, 0.2613])*0.76
dHMe = numpy.array([ 0.2749, -0.3694, -0.8877])*0.68
dHLe = numpy.array([ 0.0452, -0.9672, -0.2498])*0.68
PMe.set_dipole(0,1,dPMe)
PLe.set_dipole(0,1,dPLe)
HMe.set_dipole(0,1,dHMe)
HLe.set_dipole(0,1,dHLe)
# we save the effective model
qr.save_parcel(agg_eff, os.path.join(pre_out,
"RC_eff_Model_40_4_adjusted_CT_no_environment_unbuilt.qrp"))
agg_eff.build(mult=1)
# In[6]:
#RT = agg.get_RelaxationTensor(time, relaxation_theory="standard_Redfield", secular_relaxation=True)
rrm = agg.get_RedfieldRateMatrix()
# In[7]:
print("Relaxation time (2 -> 1) :", 1.0/rrm.data[1,2])
print("Relaxation time (3 -> 2) :", 1.0/rrm.data[2,3])
print("Relaxation time (3 -> 1) :", 1.0/rrm.data[1,3])
# TEST (put here the energies and temperature)
E2 = 2.24165620051
E1 = 2.13494501445
kbT = 0.01008086552556262
print("Relaxation time ratio :", rrm.data[2,1]/rrm.data[1,2])
print("... to be compared with :", numpy.exp(-(E2-E1)/kbT))
# In[8]:
rwa = agg.get_RWA_suggestion()
with qr.energy_units("1/cm"):
print(qr.convert(rwa,"int"))
# In[9]:
# absorption from effective theory
from quantarhei import LabSetup
from quantarhei.utils.vectors import X, Y, Z
lab = LabSetup()
lab.set_polarizations(pulse_polarizations=[X,X,X], detection_polarization=X)
agg_eff.diagonalize()
print("\nEffetive model exciation energies:")
print("Energies in 1/cm:")
N1 = agg_eff.nmono
print([qr.convert(agg_eff.HH[i,i],"int","1/cm") for i in range(1, N1+1)])
print("")
mabsc = qr.MockAbsSpectrumCalculator(time, system=agg_eff)
rho0 = agg_eff.get_DensityMatrix(condition_type="thermal", temperature=0.0)
ham = agg_eff.get_Hamiltonian()
pthways = agg_eff.liouville_pathways_1(lab=lab, ham=ham, etol=1.0e-5,
verbose=0)
mabsc.bootstrap(rwa=qr.convert(10000.0,"1/cm","int"),
shape="Gaussian")
mabsc.set_pathways(pthways)
abs1 = mabsc.calculate(raw=False)
abs1.normalize2()
absc = qr.AbsSpectrumCalculator(time, system=agg)
# In[10]:
absc.bootstrap(rwa)
# In[11]:
abss = absc.calculate()
#absexp = qr.load("bas_77K.hdf5") #("DATA/bas_77K.hdf5")
absexp = qr.load_parcel(os.path.join(pre_in, "bas_77K.qrp"))
absexp.normalize()
absexp.subtract(0.086)
absexp.normalize()
abss.normalize2() #norm=0.53)
# In[29]:
#with qr.energy_units("nm"):
# abss.plot(axis=[650, 1000, 0, 0.7], show=False)
# absexp.plot()
plt.figure(0)
with qr.energy_units("1/cm"):
#abss.plot(axis=[10500, 15000, 0, 1.1], show=False)
abs1.plot(axis=[10500, 15000, 0, 1.1], show=False)
absexp.plot(show=True)
absexp.savefig(os.path.join(pre_out, "abs_full.png"))
# in a Notebook, it seems that the figure shows itself always when we leave the cell
# In[30]:
N1 = agg.Nbe[1]
print("Energies in 1/cm:")
print([qr.convert(agg.HH[i,i],"int","1/cm") for i in range(1, N1+1)])
# In[31]:
agg.diagonalize()
# In[32]:
# exciton report
agg.exciton_report(Nrep=8)
# In[33]:
agg.report_on_expansion(2)
# In[34]:
N1 = agg.Nbe[1]
print("Energies in 1/cm:")
print([qr.convert(agg.HH[i,i],"int","1/cm") for i in range(1, N1+1)])
# In[35]:
print("Transition dipoles square:")
print(agg.D2[1:N1+1,0])
#
# ## Fractional model
#
# Remove both H and BL
# In[36]:
#
# Get components of the fractional model
#
indices_of_components = []
names_of_components = ["PM", "PL", "BM"] # , "BL","PCT1", "PCT2"] # "HL", "HM", "BL", , "BCT"
components = []
for name in names_of_components:
indx = agg.get_Molecule_index(name)
mol = agg.get_Molecule_by_name(name)
#if name == "BM":
# mol.elenergies[1] = mol.elenergies[1] + 0.1
indices_of_components.append(indx)
components.append(mol)
print("Indices of selected molecules: ", indices_of_components)
# In[37]:
#
# Coupling matrix
#
Ni = len(indices_of_components)
Jfm = numpy.zeros((Ni, Ni), dtype=qr.REAL)
k_1 = 0
for i_1 in indices_of_components:
k_2 = 0
for i_2 in indices_of_components:
Jfm[k_1, k_2] = agg.resonance_coupling[i_1, i_2]
k_2 += 1
k_1 += 1
# In[38]:
#
# Fractional aggregate
#
frac = qr.Aggregate(components)
frac.set_resonance_coupling_matrix(Jfm)
# In[39]:
fix_dipole = False
if fix_dipole:
BM_fix_dipole = frac.get_Molecule_by_name("BM")
dip = BM_fix_dipole.get_dipole(0, 1)
nrm = qr.norm(dip)
dip2 = qr.normalize2(dip, norm=numpy.sqrt(2.0)*nrm)
BM_fix_dipole.set_dipole(0, 1, dip2)
# In[40]:
#frac.save("fraction_40_4_CT_unbuilt.hdf5")
qr.save_parcel(frac, os.path.join(pre_out,"fraction_40_4_CT_unbuilt.qrp"))
# In[41]:
frac.build()
# In[42]:
absc2 = qr.AbsSpectrumCalculator(time, system=frac)
absc2.bootstrap(rwa)
abss2 = absc2.calculate()
#absexp2 = qr.load("bas_77K.hdf5")
absexp2 = qr.load_parcel(os.path.join(pre_in, "bas_77K.qrp"))
absexp2.normalize()
absexp2.subtract(0.086)
absexp2.normalize()
abss2.normalize2() #norm=0.53)
plt.figure(1)
with qr.energy_units("1/cm"):
abss2.plot(axis=[10500, 15000, 0, 1.1], show=False)
absexp2.plot(show=True)
absexp2.savefig(os.path.join(pre_out, "abs_frac.png"))
# In[43]:
frac.diagonalize()
# In[44]:
frac.report_on_expansion(3)
# In[45]:
HH = frac.get_Hamiltonian()
with qr.eigenbasis_of(HH):
with qr.energy_units("1/cm"):
print([HH.data[i,i] for i in range(1,frac.nmono)])
# In[46]:
#
# Get components of the fractional model
#
indices_of_components = []
names_of_components = ["PM", "PL", "BM", "BL","PCT1", "PCT2"] #["BM", "BL"] # "HL", "HM", "BL", , "BCT"
names_of_components3 = ["PM", "PL", "BL"]
components = []
for name in names_of_components3:
indx = agg_eff.get_Molecule_index(name)
mol = agg_eff.get_Molecule_by_name(name)
#if name == "BM":
# mol.elenergies[1] = mol.elenergies[1] + 0.1
indices_of_components.append(indx)
components.append(mol)
print("Indices of selected molecules: ", indices_of_components)
# In[47]:
#
# Fractional aggregate
#
frac_eff = qr.Aggregate(components)
frac_eff.set_resonance_coupling_matrix(Jfm)
# In[48]:
#frac_B.save("fraction_40_4_B_unbuilt.hdf5")
qr.save_parcel(frac_eff, os.path.join(pre_out,
"fraction_eff_40_4_CT_unbuilt.qrp"))
frac_eff.build()
frac_eff.diagonalize()
mabsc2 = qr.MockAbsSpectrumCalculator(time, system=frac_eff)
rho0 = frac_eff.get_DensityMatrix(condition_type="thermal", temperature=0.0)
ham = frac_eff.get_Hamiltonian()
pthways = frac_eff.liouville_pathways_1(lab=lab, ham=ham, etol=1.0e-5,
verbose=0)
mabsc2.bootstrap(rwa=qr.convert(10000.0,"1/cm","int"),
shape="Gaussian")
mabsc2.set_pathways(pthways)
abs2 = mabsc2.calculate(raw=False)
abs2.normalize2()
plt.figure(2)
with qr.energy_units("1/cm"):
#abss2.plot(axis=[10500, 15000, 0, 1.1], show=False)
abs2.plot(axis=[10500, 15000, 0, 1.1], show=False)
absexp2.plot(show=False)
absexp2.savefig(os.path.join(pre_out, "abs_frac_eff.png"))
|
20,805 | 911f4ca1f52fab6ba37c222c516ab97ed0ad2334 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Convert tensorflow checkpoint to oneflow snapshot"""
import re
import argparse
import tensorflow as tf
import numpy as np
import os
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--of_dump_path",
default = None,
type = str,
required = True,
help = "Path to the output OneFlow model.")
#args = parser.parse_args()
args, unknown = parser.parse_known_args()
print(args)
# parse unknown arguments for extra weights
extra_weights = {}
for u in unknown:
w = u.split("=")
assert len(w) == 2
if len(w) == 2:
extra_weights[w[0]] = float(w[1])
def _write_blob(folder, blob):
os.makedirs(folder, exist_ok=True)
filename = os.path.join(folder, "out")
f = open(filename, 'wb')
f.write(blob.tobytes())
f.close()
print(filename, blob.shape)
def _SaveWeightBlob2File(blob, folder):
_write_blob(folder, blob)
for weight, default_value in extra_weights.items():
d = np.full_like(blob, default_value)
_write_blob(folder + weight, d)
def convert():
path = args.tf_checkpoint_path
init_vars = tf.train.list_variables(path)
for name, shape in init_vars:
array = tf.train.load_variable(path, name)
sep = name.rfind('/')
blob_name = name[sep + 1:]
op_name = name[:sep].replace('/', '-')
if blob_name == "kernel":
blob_name = "weight"
elif blob_name in ['adam_m', 'adam_v']:
print("find m, v weights")
folder_name = 'student'+'-'+op_name+"-"+blob_name
folder = os.path.join(args.of_dump_path, folder_name)
#print("saved to:", folder)
_SaveWeightBlob2File(array, folder)
if __name__ == "__main__":
convert()
|
20,806 | 9a2d95e1967d98fb3375dbdd48151c0cddb212b0 | from QASMParser.QASMTypes import *
def set_lang():
ClassicalRegister.to_lang = ClassicalRegister_to_Python
QuantumRegister.to_lang = QuantumRegister_to_Python
Let.to_lang = Let_to_Python
Argument.to_lang = Argument_to_Python
CallGate.to_lang = CallGate_to_Python
Comment.to_lang = Comment_to_Python
Measure.to_lang = Measure_to_Python
IfBlock.to_lang = IfBlock_to_Python
Gate.to_lang = CreateGate_to_Python
Opaque.to_lang = CreateGate_to_Python
PyBlock.to_lang = PyBlock_to_Python
Loop.to_lang = Loop_to_Python
NestLoop.to_lang = Loop_to_Python
Reset.to_lang = Reset_to_Python
Output.to_lang = Output_to_Python
InitEnv.to_lang = init_env
# Several details pertaining to the language in question
hoistFuncs = False # Move functions to front of program
hoistVars = False # Move variables to front of program
bareCode = False # Can code be bare or does it need to be in function
blockOpen = ":" # Block delimiters
blockClose = "" # "" ""
indent = " " # Standard indent depth
def Python_include(filename):
return f'from {filename} import *'
header = [Python_include("QuESTLibs")]
def init_env(self):
return f'Env = createQuESTEnv()'
def Output_to_Python(self):
carg, bindex = self._cargs
return f'print({carg.name}[{bindex}])'
def Reset_to_Python(self):
qarg = self._qargs
qargRef = self.resolve_arg(qarg)
return f'collapseToOutcome(qreg, {qargRef}, 0)'
def ClassicalRegister_to_Python(self):
return f'{self.name} = [0]*{self.size}'
def QuantumRegister_to_Python(self):
return f"{self.name} = createQureg({self.size}, Env)"
def Argument_to_Python(self):
if self.classical: return f'{self.name}'
else: return f'{self.name}, {self.name}_index'
def Let_to_Python(self):
var = self.const
assignee = var.name
# Simple declaration
if var.val is None and var.var_type is None: return f"{assignee} = None"
elif var.val is None and var.var_type: return f'{assignee} = {var.var_type}()'
if type(var.val) is list: value = ",".join(var.val)
else: value = f'{var.val}'
if var.cast: value = f"{var.cast}({value})"
return f"{assignee} = {value}"
def PyBlock_to_Python(self):
return ""
def CallGate_to_Python(self):
printArgs = ""
if self._qargs:
printArgs += "qreg, "
printArgs += ", ".join([self.resolve_arg(qarg) for qarg in self._qargs])
for carg in self._cargs:
if printArgs:
printArgs += ", "+carg
else:
printArgs = carg
printGate = self.name
preString = []
outString = ""
for line in preString:
outString += line + ";\n"
outString += f"{printGate}({printArgs})"
return outString
def Comment_to_Python(self):
return "#" + self.comment
def Measure_to_Python(self):
carg, bindex = self._cargs
qarg = self._qargs
qargRef = self.resolve_arg(qarg)
return f"{carg.name}[{bindex}] = measure(qreg, {qargRef})"
def IfBlock_to_Python(self):
return f"if ({self._cond})"
def CreateGate_to_Python(self):
if type(self._code[0]) is Verbatim and self._code[0].line == ";": self._code = [Verbatim("pass")]
printArgs = ""
if self._qargs:
printArgs += "qreg"
printArgs += ", " + ", ".join([f"{qarg}_index" for qarg in self._qargs])
for carg in self._cargs:
if printArgs: printArgs += ", "+carg
else: printArgs += carg
outStr = f"def {self.name}({printArgs})"
return outStr
def Loop_to_Python(self):
return f"for {self.var} in range({self.start}, {self.end}, {self.step})"
|
20,807 | 883722ea941f1f7e888292d0f1e0f0da2389a0c1 | """
The pico views is supposed to mimic a pico server
"""
from django.http import HttpResponse
from django.shortcuts import render_to_response
from xml.sax import parse
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import os
import sys
import time
def rooms(request):
return render_to_response('list_of_rooms.txt', mimetype='text/plain')
def info(request, room):
return render_to_response('danceroom.xml', mimetype='text/xml')
def feed(request, room):
return HttpResponse(xml_feed(), mimetype="text/xml")
def xml_feed():
f = open(os.path.join(os.path.dirname(__file__), 'templates/test_feed.xml'))
for line in f:
yield line
if "</room>" in line:
time.sleep(1)
|
20,808 | c1921a8b44a1739d3a7d39a63d56fda200ca9f51 | import numpy as np
import pytest
from src.Curves.Curve import Curve
from src.Enums.Frequency import Frequency
from src.Options.Swaption import PayerReceiver
from src.Options.Swaption import Swaption
@pytest.fixture
def flat_curve():
return Curve(tenors=np.array([1.0]), zero_rates=np.array([0.1]))
def test_swaption_irs(flat_curve):
swaption: Swaption = Swaption(1.0, 0.1, 1.0, 1.0, 2.0, Frequency.QUARTERLY, PayerReceiver.PAYER)
assert np.allclose(0.023270738579718328, swaption.black_price(flat_curve, vol=0.7))
|
20,809 | 0a3fcc1489505fa8b8e07c7a9e2647b160b27f7a | #作业二:
#使用 Scrapy 框架和 XPath
# 抓取猫眼电影的前 10 个电影名称、电影类型和上映时间,
# 并以 UTF-8 字符集保存到 csv 格式的文件中。
#要求:必须使用 Scrapy 框架及其自带的 item pipeline、选择器功能,
# 不允许使用 bs4 进行页面内容的筛选。
|
20,810 | 070ba469390b2df40f8b7157ccb5593eb624c4b7 | """Tests for django.db.backends.utils"""
from decimal import Decimal, Rounded
from django.db import NotSupportedError, connection
from django.db.backends.utils import (
format_number,
split_identifier,
split_tzname_delta,
truncate_name,
)
from django.test import (
SimpleTestCase,
TransactionTestCase,
skipIfDBFeature,
skipUnlessDBFeature,
)
class TestUtils(SimpleTestCase):
def test_truncate_name(self):
self.assertEqual(truncate_name("some_table", 10), "some_table")
self.assertEqual(truncate_name("some_long_table", 10), "some_la38a")
self.assertEqual(truncate_name("some_long_table", 10, 3), "some_loa38")
self.assertEqual(truncate_name("some_long_table"), "some_long_table")
# "user"."table" syntax
self.assertEqual(
truncate_name('username"."some_table', 10), 'username"."some_table'
)
self.assertEqual(
truncate_name('username"."some_long_table', 10), 'username"."some_la38a'
)
self.assertEqual(
truncate_name('username"."some_long_table', 10, 3), 'username"."some_loa38'
)
def test_split_identifier(self):
self.assertEqual(split_identifier("some_table"), ("", "some_table"))
self.assertEqual(split_identifier('"some_table"'), ("", "some_table"))
self.assertEqual(
split_identifier('namespace"."some_table'), ("namespace", "some_table")
)
self.assertEqual(
split_identifier('"namespace"."some_table"'), ("namespace", "some_table")
)
def test_format_number(self):
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal("0", 12, 3, "0.000")
equal("0", 12, 8, "0.00000000")
equal("1", 12, 9, "1.000000000")
equal("0.00000000", 12, 8, "0.00000000")
equal("0.000000004", 12, 8, "0.00000000")
equal("0.000000008", 12, 8, "0.00000001")
equal("0.000000000000000000999", 10, 8, "0.00000000")
equal("0.1234567890", 12, 10, "0.1234567890")
equal("0.1234567890", 12, 9, "0.123456789")
equal("0.1234567890", 12, 8, "0.12345679")
equal("0.1234567890", 12, 5, "0.12346")
equal("0.1234567890", 12, 3, "0.123")
equal("0.1234567890", 12, 1, "0.1")
equal("0.1234567890", 12, 0, "0")
equal("0.1234567890", None, 0, "0")
equal("1234567890.1234567890", None, 0, "1234567890")
equal("1234567890.1234567890", None, 2, "1234567890.12")
equal("0.1234", 5, None, "0.1234")
equal("123.12", 5, None, "123.12")
with self.assertRaises(Rounded):
equal("0.1234567890", 5, None, "0.12346")
with self.assertRaises(Rounded):
equal("1234567890.1234", 5, None, "1234600000")
def test_split_tzname_delta(self):
tests = [
("Asia/Ust+Nera", ("Asia/Ust+Nera", None, None)),
("Asia/Ust-Nera", ("Asia/Ust-Nera", None, None)),
("Asia/Ust+Nera-02:00", ("Asia/Ust+Nera", "-", "02:00")),
("Asia/Ust-Nera+05:00", ("Asia/Ust-Nera", "+", "05:00")),
("America/Coral_Harbour-01:00", ("America/Coral_Harbour", "-", "01:00")),
("America/Coral_Harbour+02:30", ("America/Coral_Harbour", "+", "02:30")),
("UTC+15:00", ("UTC", "+", "15:00")),
("UTC-04:43", ("UTC", "-", "04:43")),
("UTC", ("UTC", None, None)),
("UTC+1", ("UTC+1", None, None)),
]
for tzname, expected in tests:
with self.subTest(tzname=tzname):
self.assertEqual(split_tzname_delta(tzname), expected)
class CursorWrapperTests(TransactionTestCase):
available_apps = []
def _test_procedure(self, procedure_sql, params, param_types, kparams=None):
with connection.cursor() as cursor:
cursor.execute(procedure_sql)
# Use a new cursor because in MySQL a procedure can't be used in the
# same cursor in which it was created.
with connection.cursor() as cursor:
cursor.callproc("test_procedure", params, kparams)
with connection.schema_editor() as editor:
editor.remove_procedure("test_procedure", param_types)
@skipUnlessDBFeature("create_test_procedure_without_params_sql")
def test_callproc_without_params(self):
self._test_procedure(
connection.features.create_test_procedure_without_params_sql, [], []
)
@skipUnlessDBFeature("create_test_procedure_with_int_param_sql")
def test_callproc_with_int_params(self):
self._test_procedure(
connection.features.create_test_procedure_with_int_param_sql,
[1],
["INTEGER"],
)
@skipUnlessDBFeature(
"create_test_procedure_with_int_param_sql", "supports_callproc_kwargs"
)
def test_callproc_kparams(self):
self._test_procedure(
connection.features.create_test_procedure_with_int_param_sql,
[],
["INTEGER"],
{"P_I": 1},
)
@skipIfDBFeature("supports_callproc_kwargs")
def test_unsupported_callproc_kparams_raises_error(self):
msg = (
"Keyword parameters for callproc are not supported on this database "
"backend."
)
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.cursor() as cursor:
cursor.callproc("test_procedure", [], {"P_I": 1})
|
20,811 | dcd60ad41fce47d28d6db6e007daf08325e4f268 | # Generated by Django 2.2.5 on 2021-01-19 06:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('City_id', models.CharField(blank=True, max_length=10, null=True, unique=True)),
('City_name', models.CharField(blank=True, max_length=10, null=True)),
],
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('State_id', models.CharField(blank=True, max_length=10, null=True, unique=True)),
('State_name', models.CharField(blank=True, max_length=10, null=True)),
],
),
migrations.CreateModel(
name='Employee_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Bank_name', models.CharField(blank=True, max_length=10, null=True)),
('Branch_name', models.CharField(blank=True, max_length=10, null=True)),
('Branch_code', models.CharField(blank=True, max_length=10, null=True)),
('Region_head', models.CharField(blank=True, max_length=10, null=True)),
('Emp_id', models.CharField(blank=True, max_length=10, null=True)),
('Emp_name', models.CharField(blank=True, max_length=10, null=True)),
('Phone_no', models.CharField(blank=True, max_length=10, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('designation', models.CharField(blank=True, max_length=10, null=True)),
('grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D')], max_length=10, null=True)),
('CTC', models.CharField(blank=True, max_length=10, null=True)),
('remarks', models.TextField(blank=True, null=True)),
('createdAt', models.DateTimeField(auto_now_add=True, null=True)),
('updatedAt', models.DateTimeField(auto_now=True, null=True)),
('City_name', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='larave_appl.City')),
('State', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='larave_appl.State')),
],
),
migrations.AddField(
model_name='city',
name='City_State',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='larave_appl.State'),
),
]
|
20,812 | a5fa404b6f931333f328f3f945c5b177d796ea36 | from django.contrib import admin
# Register your models here.
from blogs.models import Blog
@admin.register(Blog)
class BlogRegister(admin.ModelAdmin):
pass |
20,813 | ea83d785cb88345a1faf7448df2ae8377ce8713c | import turtle
import string
alphabet = string.ascii_uppercase
def make_table(turtle, rows, cols):
for row in range(rows):
for col in range(cols):
turtle.down()
turtle.write("{}{}".format(alphabet[row], col), font = ("Arial", 20, "normal"))
turtle.up()
turtle.forward(40)
turtle.goto(-300, 270 - 30 * row)
def main():
leo = turtle.Turtle()
canvas = turtle.Screen()
leo.speed(1)
leo.up()
leo.goto(-300, 300)
make_table(leo, 5, 5)
canvas.exitonclick()
main()
|
20,814 | bf3389f00582ff561b2ccac6f64d2b4eff461f4c | import os, glob
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
# 분류대상 카테고리 선택
caltech_dir = "./image/101_ObjectCategories"
categories = ["chair", "camera", "butterfly", "elephant", "flamingo"]
nb_classes = len(categories)
# 이미지 크기 지정
image_width = 64
image_height = 64
pixels = image_width * image_height
# 이미지 읽기
X = []
Y = []
for idx, cat in enumerate(categories) :
# 레이블 지정
label = [0 for i in range(nb_classes)]
label[idx] = 1
# 이미지
image_dir = caltech_dir + "/" + cat
files = glob.glob(image_dir + "/*.jpg")
print(image_dir)
for i, f in enumerate(files) :
img = Image.open(f)
img = img.convert("RGB")
img = img.resize((image_width, image_height))
data = np.asarray(img)
X.append(data)
Y.append(label)
if i % 10 == 0 :
print(i, "\n", data)
X = np.array(X)
Y = np.array(Y)
# 학습 & 테스트 데이터 구분
X_train , X_test, y_train, y_test = \
train_test_split(X, Y)
xy = (X_train, X_test, y_train, y_test)
np.save("./image/5obj.npy", xy)
print("ok", len(Y)) |
20,815 | 9fa51a9b1b0b621c3c795d283e795c9f84af5625 | s=int(input())
print(s//(60*60),s//60) |
20,816 | d6cd4e6cb15026de9836198b5c352c3e44afb1a9 | from flask import Flask
from apiGatewayAPI import api_gateway_api
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
app.register_blueprint(api_gateway_api)
@app.route('/')
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
app.run(debug=True,
port=5010,
)
|
20,817 | 9c46c361246d5180cf99c8862f927af837e57f93 | import asyncio
from decimal import Decimal as D
from django.conf import settings
from absortium.client import get_absortium_client
from core.utils.logging import getPrettyLogger
from poloniex.app import Application
from poloniexbot import constants
from poloniexbot.utils import update_storage, synchronize_orders, filter_orders, get_locked_balance, cut_off_orders, \
create_actions, convert
__author__ = "andrew.shvv@gmail.com"
logger = getPrettyLogger(__name__)
client = get_absortium_client(api_key=settings.ABSORTIUM_API_KEY,
api_secret=settings.ABSORTIUM_API_SECRET,
base_api_uri="http://docker.backend:3000")
storage = {
'orders': {
'sell': [],
'buy': []
}
}
class PoloniexApp(Application):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def updates_handler(**update):
if update.get('type') in [constants.POLONIEX_ORDER_REMOVED, constants.POLONIEX_ORDER_MODIFIED]:
order = update.get("data")
order["pair"] = update.get("currency_pair")
order = convert(order)
update_storage(storage, order)
async def main(self):
logger.debug("--" * 20 + "Cancel" + "--" * 20)
for order in [order for order in client.orders.list() if order['status'] not in ['canceled', 'completed']]:
logger.debug(client.orders.cancel(pk=order['pk']))
# 1. Turn on Poloniex orders update.
self.push_api.subscribe(topic=constants.CURRENCY_PAIR, handler=PoloniexApp.updates_handler)
# 2. Get Poloniex orders.
orders = await self.public_api.returnOrderBook(currencyPair=constants.CURRENCY_PAIR, depth=constants.COUNT)
# 3. Merge Poloniex orders.
synchronize_orders(storage, orders)
# Every 5 second:
while True:
order_type = 'sell'
from_currency = 'eth'
to_currency = 'btc'
# 1. Get Absortium orders.
absortium_orders = client.orders.list(order_type=order_type)
# 2. Leave only 'init' and 'pending' orders.
absortium_orders = filter_orders(absortium_orders)
# 3. Calculate how many amount we have to operate with. We should take into account money
# restriction (Not all order from Poloniex will be synced, because we do not have such amount of money)
account = client.accounts.retrieve(currency=from_currency)
accounts_balance = D(account['amount'])
amount = get_locked_balance(absortium_orders) + accounts_balance
logger.debug("--" * 20 + "Account" + "--" * 20)
logger.debug(account)
# 4. Get Poloniex orders and cut off redundant.
poloniex_orders = storage['orders'][order_type]
logger.debug("--" * 20 + "Before cut" + "--" * 20)
logger.debug(poloniex_orders)
poloniex_orders = cut_off_orders(amount, poloniex_orders)
logger.debug("--" * 20 + "After cut" + "--" * 20)
logger.debug(poloniex_orders)
# 5. What should have to do to sync the orders?
actions = create_actions(absortium_orders, poloniex_orders)
logger.debug("--" * 20 + "Actions" + "--" * 20)
for order in actions['delete']:
logger.debug({'delete': order})
client.orders.cancel(**order)
for order in actions['update']:
del order['total']
logger.debug({'update': order})
client.orders.update(**order)
for order in actions['create']:
logger.debug({'create': order})
client.orders.create(**order)
await asyncio.sleep(0.5)
|
20,818 | 6918038a382e23b62677c5cdab7f392da6b6be4b | from flask import Flask, request
from flask import render_template, send_file
import io
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def hello_world():
return render_template('main.html')
@app.route('/image_upload/', methods=['GET', 'POST'])
def image_upload():
if request.method == 'POST':
f = request.files['image']
return send_file(filename_or_fp=io.BytesIO(f.read()), attachment_filename=f.filename)
if __name__ == '__main__':
app.debug = True
app.run()
# app.run(host='0.0.0.0') |
20,819 | 654056d8e4fd2fc22eaa882832568af16d38e065 | #!/usr/bin/env python
class Node:
def __init__(self, data, next=None):
self.data = data
self.next = next
class LinkedList:
def __init__(self, data, head=None):
self.node = Node(data)
self.head = head
if not self.head:
self.head = self.node
def append(self, data):
node = Node(data)
current_node = self.head
while current_node.next != None:
current_node = current_node.next
current_node.next = node
return
def print_list(self):
entries = []
current_node = self.head
while current_node.next != None:
entries.append(current_node.data)
current_node = current_node.next
entries.append(current_node.data)
return " -> ".join(entries)
def length_iterative(self):
count = 0
current_node = self.head
while current_node.next != None:
current_node = current_node.next
count += 1
return count + 1
def length_recursive(self, node, count):
if not count:
count = 0
if not node:
return
elif node.next is None:
count += 1
return count
count += 1
return self.length_recursive(node.next, count)
def get_count(self):
return self.length_recursive(self.head, count=None)
ll = LinkedList('a')
ll.append('b')
ll.append('c')
ll.append('d')
print "list = {}".format(ll.print_list())
print "Length via iteration = {}".format(ll.length_iterative())
print "Length via recursion = {}".format(ll.get_count())
|
20,820 | abc7e562591e064895f9b053a4cf257e9cca99da | from clove.network.bitcoin import Bitcoin
class Neoscoin(Bitcoin):
"""
Class with all the necessary Neoscoin (NEOS) network information based on
https://github.com/neoscoin/neos-core/blob/master/src/chainparams.cpp
(date of access: 02/16/2018)
"""
name = 'neoscoin'
symbols = ('NEOS', )
seeds = ('nodes.neoscoin.com', )
port = 29320
message_start = b'\xd3\x1a\x3d\xe4'
base58_prefixes = {
'PUBKEY_ADDR': 53,
'SCRIPT_ADDR': 5,
'SECRET_KEY': 177
}
# no testnet
|
20,821 | 54fec64813c0f861848a4a452b7d1e0e468e02e9 | from flask import Flask, jsonify, request
import json
app = Flask(__name__)
@app.route('/<int:id>')
def pessoa(id):
soma = 1 + id
return jsonify({'id': id, 'nome':'Arthur'})
# @app.route('/soma/<int:v1>/<int:v2>/')
# def soma(v1,v2):
# return {'soma': v1 + v2}
@app.route('/soma', methods= ['POST', 'PUT', 'GET'] )
def soma():
if request.method == 'POST':
dados = json.loads(request.data)
total = sum(dados['valores'])
elif request.method == 'GET':
total = 10+10
return {'soma': total}
if __name__ == '__main__':
app.run(debug=True) |
20,822 | 2ea9ee67e549674116fd24cbf746989c9a0f76ee | '''
Created on Sep 29, 2018
@author: Leo
'''
from configReader import ConfigReader
from sensorEmulator import SensorEmulator
from threading import Thread
from sensorData import SensorData
import sense_hat
class TempSensorAdaptor(Thread):
deviceConfigReader=None
tempSensorEmulator=None
enableTempEmulator=False
currentTemp=0
currentSensorData=None
sh=None
def __init__(self,configDir):
Thread.__init__(self)
self.deviceConfigReader=ConfigReader(configDir,"Device")
self.sh=sense_hat.SenseHat()
self.currentSensorData=SensorData("Temperature")
#SensorEmulator simulates temperature range from 25 degree to 15 degree
#temperature is changed after a random period of time in range 10 sec to 1 sec
self.tempSensorEmulator=SensorEmulator(30, 15, 10, 1)
self.tempSensorEmulator.daemon=True
self.tempSensorEmulator.start()
if self.deviceConfigReader.getProperty("enableEmulator") =="True":
self.enableTempEmulator=True
self.tempSensorEmulator.enableTempEmulator=True
def getCurrentTemp(self):
return self.currentTemp
def getCurrentSensorData(self):
self.currentSensorData.addNewValue(self.currentTemp) # refresh SensorData only when it is called.
return self.currentSensorData
def run(self):
while True:
if self.enableTempEmulator: #If enableTempEmulator is false then using sense_hat to get current temperature!
self.currentTemp=self.tempSensorEmulator.getCurrValue()
else:
self.currentTemp=self.sh.get_temperature()
def setflagEnableTempEmulator(self,enableTempEmulator):
self.enableTempEmulator=enableTempEmulator
self.tempSensorEmulator.enableTempEmulator=enableTempEmulator
|
20,823 | 79784765438c980fc856b93981518afdc56b338c | # Importing the dataset
import pandas_datareader.data as web
import datetime
import requests_cache
expire_after = datetime.timedelta(days=3)
session = requests_cache.CachedSession(cache_name='cache', backend='sqlite', expire_after=expire_after)
start = datetime.datetime(2016, 12, 31)
end = datetime.datetime(2019, 12, 30)
f = web.DataReader("F", 'yahoo', start, end, session=session)
df = pd.DataFrame(f)
print('DataFrame:\n', df)
# default CSV
csv_data = df.to_csv('file4.csv')
print('\nCSV String:\n', csv_data)
df.to_csv('file1.csv')
|
20,824 | 9027002b952f801c6a5876f4be140a2e541c725f | import os
import math
import pickle
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from LSTMLinear import LSTMModel
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
class TTransEModel(nn.Module):
def __init__(self, config):
super(TTransEModel, self).__init__()
self.learning_rate = config.learning_rate
self.early_stopping_round = config.early_stopping_round
self.L1_flag = config.L1_flag
self.filter = config.filter
self.embedding_size = config.embedding_size
self.entity_total = config.entity_total
self.relation_total = config.relation_total
self.tem_total = config.tem_total
self.batch_size = config.batch_size
ent_weight = floatTensor(self.entity_total, self.embedding_size)
rel_weight = floatTensor(self.relation_total, self.embedding_size)
tem_weight = floatTensor(self.tem_total, self.embedding_size)
# Use xavier initialization method to initialize embeddings of entities and relations
nn.init.xavier_uniform(ent_weight)
nn.init.xavier_uniform(rel_weight)
nn.init.xavier_uniform(tem_weight)
self.ent_embeddings = nn.Embedding(self.entity_total, self.embedding_size)
self.rel_embeddings = nn.Embedding(self.relation_total, self.embedding_size)
self.tem_embeddings = nn.Embedding(self.tem_total, self.embedding_size)
self.ent_embeddings.weight = nn.Parameter(ent_weight)
self.rel_embeddings.weight = nn.Parameter(rel_weight)
self.tem_embeddings.weight = nn.Parameter(tem_weight)
normalize_entity_emb = F.normalize(self.ent_embeddings.weight.data, p=2, dim=1)
normalize_relation_emb = F.normalize(self.rel_embeddings.weight.data, p=2, dim=1)
normalize_temporal_emb = F.normalize(self.tem_embeddings.weight.data, p=2, dim=1)
self.ent_embeddings.weight.data = normalize_entity_emb
self.rel_embeddings.weight.data = normalize_relation_emb
self.tem_embeddings.weight.data = normalize_temporal_emb
def forward(self, pos_h, pos_t, pos_r, pos_tem, neg_h, neg_t, neg_r, neg_tem):
pos_h_e = self.ent_embeddings(pos_h)
pos_t_e = self.ent_embeddings(pos_t)
pos_r_e = self.rel_embeddings(pos_r)
pos_tem_e = self.tem_embeddings(pos_tem)
neg_h_e = self.ent_embeddings(neg_h)
neg_t_e = self.ent_embeddings(neg_t)
neg_r_e = self.rel_embeddings(neg_r)
neg_tem_e = self.tem_embeddings(neg_tem)
if self.L1_flag:
pos = torch.sum(torch.abs(pos_h_e + pos_r_e + pos_tem_e - pos_t_e), 1)
neg = torch.sum(torch.abs(neg_h_e + neg_r_e + neg_tem_e - neg_t_e), 1)
else:
pos = torch.sum((pos_h_e + pos_r_e + pos_tem_e - pos_t_e) ** 2, 1)
neg = torch.sum((neg_h_e + neg_r_e + neg_tem_e - neg_t_e) ** 2, 1)
return pos, neg
class TADistmultModel(nn.Module):
def __init__(self, config):
super(TADistmultModel, self).__init__()
self.learning_rate = config.learning_rate
self.early_stopping_round = config.early_stopping_round
self.L1_flag = config.L1_flag
self.filter = config.filter
self.embedding_size = config.embedding_size
self.entity_total = config.entity_total
self.relation_total = config.relation_total
self.tem_total = config.tem_total # 32
self.batch_size = config.batch_size
self.criterion = nn.Softplus()
torch.nn.BCELoss()
self.dropout = nn.Dropout(config.dropout)
self.lstm = LSTMModel(self.embedding_size, n_layer=1)
ent_weight = floatTensor(self.entity_total, self.embedding_size)
rel_weight = floatTensor(self.relation_total, self.embedding_size)
tem_weight = floatTensor(self.tem_total, self.embedding_size)
# Use xavier initialization method to initialize embeddings of entities and relations
nn.init.xavier_uniform(ent_weight)
nn.init.xavier_uniform(rel_weight)
nn.init.xavier_uniform(tem_weight)
self.ent_embeddings = nn.Embedding(self.entity_total, self.embedding_size)
self.rel_embeddings = nn.Embedding(self.relation_total, self.embedding_size)
self.tem_embeddings = nn.Embedding(self.tem_total, self.embedding_size)
self.ent_embeddings.weight = nn.Parameter(ent_weight)
self.rel_embeddings.weight = nn.Parameter(rel_weight)
self.tem_embeddings.weight = nn.Parameter(tem_weight)
normalize_entity_emb = F.normalize(self.ent_embeddings.weight.data, p=2, dim=1)
normalize_relation_emb = F.normalize(self.rel_embeddings.weight.data, p=2, dim=1)
normalize_temporal_emb = F.normalize(self.tem_embeddings.weight.data, p=2, dim=1)
self.ent_embeddings.weight.data = normalize_entity_emb
self.rel_embeddings.weight.data = normalize_relation_emb
self.tem_embeddings.weight.data = normalize_temporal_emb
def scoring(self, h, t, r):
return torch.sum(h * t * r, 1, False)
def forward(self, pos_h, pos_t, pos_r, pos_tem, neg_h, neg_t, neg_r, neg_tem):
pos_h_e = self.ent_embeddings(pos_h)
pos_t_e = self.ent_embeddings(pos_t)
pos_rseq_e = self.get_rseq(pos_r, pos_tem)
neg_h_e = self.ent_embeddings(neg_h)
neg_t_e = self.ent_embeddings(neg_t)
neg_rseq_e = self.get_rseq(neg_r, neg_tem)
pos_h_e = self.dropout(pos_h_e)
pos_t_e = self.dropout(pos_t_e)
pos_rseq_e = self.dropout(pos_rseq_e)
neg_h_e = self.dropout(neg_h_e)
neg_t_e = self.dropout(neg_t_e)
neg_rseq_e = self.dropout(neg_rseq_e)
pos = self.scoring(pos_h_e, pos_t_e, pos_rseq_e)
neg = self.scoring(neg_h_e, neg_t_e, neg_rseq_e)
return pos, neg
def get_rseq(self, r, tem):
r_e = self.rel_embeddings(r)
r_e = r_e.unsqueeze(0).transpose(0, 1)
bs = tem.shape[0] # batch size
tem_len = tem.shape[1]
tem = tem.contiguous()
tem = tem.view(bs * tem_len)
token_e = self.tem_embeddings(tem)
token_e = token_e.view(bs, tem_len, self.embedding_size)
seq_e = torch.cat((r_e, token_e), 1)
hidden_tem = self.lstm(seq_e)
hidden_tem = hidden_tem[0, :, :]
rseq_e = hidden_tem
return rseq_e
class TATransEModel(nn.Module):
def __init__(self, config):
super(TATransEModel, self).__init__()
self.learning_rate = config.learning_rate
self.early_stopping_round = config.early_stopping_round
self.L1_flag = config.L1_flag
self.filter = config.filter
self.embedding_size = config.embedding_size
self.entity_total = config.entity_total
self.relation_total = config.relation_total
self.tem_total = 32
self.batch_size = config.batch_size
self.dropout = nn.Dropout(config.dropout)
self.lstm = LSTMModel(self.embedding_size, n_layer=1)
ent_weight = floatTensor(self.entity_total, self.embedding_size)
rel_weight = floatTensor(self.relation_total, self.embedding_size)
tem_weight = floatTensor(self.tem_total, self.embedding_size)
# Use xavier initialization method to initialize embeddings of entities and relations
nn.init.xavier_uniform(ent_weight)
nn.init.xavier_uniform(rel_weight)
nn.init.xavier_uniform(tem_weight)
self.ent_embeddings = nn.Embedding(self.entity_total, self.embedding_size)
self.rel_embeddings = nn.Embedding(self.relation_total, self.embedding_size)
self.tem_embeddings = nn.Embedding(self.tem_total, self.embedding_size)
self.ent_embeddings.weight = nn.Parameter(ent_weight)
self.rel_embeddings.weight = nn.Parameter(rel_weight)
self.tem_embeddings.weight = nn.Parameter(tem_weight)
normalize_entity_emb = F.normalize(self.ent_embeddings.weight.data, p=2, dim=1)
normalize_relation_emb = F.normalize(self.rel_embeddings.weight.data, p=2, dim=1)
normalize_temporal_emb = F.normalize(self.tem_embeddings.weight.data, p=2, dim=1)
self.ent_embeddings.weight.data = normalize_entity_emb
self.rel_embeddings.weight.data = normalize_relation_emb
self.tem_embeddings.weight.data = normalize_temporal_emb
def forward(self, pos_h, pos_t, pos_r, pos_tem, neg_h, neg_t, neg_r, neg_tem):
pos_h_e = self.ent_embeddings(pos_h)
pos_t_e = self.ent_embeddings(pos_t)
pos_rseq_e = self.get_rseq(pos_r, pos_tem)
neg_h_e = self.ent_embeddings(neg_h)
neg_t_e = self.ent_embeddings(neg_t)
neg_rseq_e = self.get_rseq(neg_r, neg_tem)
pos_h_e = self.dropout(pos_h_e)
pos_t_e = self.dropout(pos_t_e)
pos_rseq_e = self.dropout(pos_rseq_e)
neg_h_e = self.dropout(neg_h_e)
neg_t_e = self.dropout(neg_t_e)
neg_rseq_e = self.dropout(neg_rseq_e)
if self.L1_flag:
pos = torch.sum(torch.abs(pos_h_e + pos_rseq_e - pos_t_e), 1)
neg = torch.sum(torch.abs(neg_h_e + neg_rseq_e - neg_t_e), 1)
else:
pos = torch.sum((pos_h_e + pos_rseq_e - pos_t_e) ** 2, 1)
neg = torch.sum((neg_h_e + neg_rseq_e - neg_t_e) ** 2, 1)
return pos, neg
def get_rseq(self, r, tem):
r_e = self.rel_embeddings(r)
r_e = r_e.unsqueeze(0).transpose(0, 1)
bs = tem.shape[0] # batch size
tem_len = tem.shape[1]
tem = tem.contiguous()
tem = tem.view(bs * tem_len)
token_e = self.tem_embeddings(tem)
token_e = token_e.view(bs, tem_len, self.embedding_size)
seq_e = torch.cat((r_e, token_e), 1)
hidden_tem = self.lstm(seq_e)
hidden_tem = hidden_tem[0, :, :]
rseq_e = hidden_tem
return rseq_e
|
20,825 | 2d9bdf304779ee77ec49f0bef5a5f4b766632a9f | #!/usr/bin/env python3
# -*- coding: utf-8 -*
"""
项目名称: Hopetree/qinglong
Author: Hopetree
功能:叮咚买菜签到
0、抓包拿到叮咚买菜 cookie
1、添加环境变量 DDXQ_COOKIE,多个用&拼接,例如 DDXQ_COOKIE=cookie1&cookie2
2、每日签到并查询当前积分信息
Date: 2021/11/3
Update: 2021/11/4
cron: 30 8 * * *
new Env('叮咚买菜签到');
"""
import os
import requests
from common.common import send_msg, hidden_key
COOKIE = os.environ.get('DDXQ_COOKIE')
TITLE = '叮咚买菜签到'
class DDXQ:
content_list = []
def __init__(self, cookie):
self.cookie = cookie
self.hidden_ck = hidden_key(self.cookie)
self.params = {'api_version': '9.7.3',
'app_version': '1.0.0',
'app_client_id': '3',
'native_version': '9.30.0',
'city_number': '1103',
'latitude': '22.602782',
'longitude': '113.852799'}
self.headers = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 xzone/9.30.0',
'Cookie': self.cookie
}
def check_and_sign_in(self):
"""查询用户信息,查询成功就签到"""
url = 'https://maicai.api.ddxq.mobi/user/info'
resp = requests.get(url, headers=self.headers, params=self.params).json()
if resp['code'] == 0:
user_name = resp['data']['name']
msg = self.sign_in()
content = '用户: {}'.format(user_name) + msg
elif resp['code'] == 9007 or resp['code'] == 1111:
content = '\ncookie:{}\n签到失败,cookie 失效'.format(self.hidden_ck)
else:
content = '\ncookie:{}\n签到失败,未知错误 {}'.format(self.hidden_ck, resp)
self.content_list.append(content)
def query_point(self):
"""查询积分"""
url = 'https://maicai.api.ddxq.mobi/point/home'
resp = requests.get(url, headers=self.headers, params=self.params).json()
point_num = resp['data']['point_num']
point_money = resp['data']['point_money']
expire_point_display = resp['data']['expire_point_display']
return point_num, point_money, expire_point_display
def sign_in(self):
"""签到"""
url = 'https://sunquan.api.ddxq.mobi/api/v2/user/signin/'
resp = requests.post(url, headers=self.headers, json=self.params).json()
if resp['code'] == 0:
sign_series = resp['data']['sign_series']
point_num, point_money, expire_point_display = self.query_point()
msg = '\n签到成功,获得积分:{}\n总积分:{}\n积分价值:{}元\n积分过期:{}'.format(
sign_series, point_num,
point_money,
expire_point_display)
else:
msg = '\ncookie:{}\n签到失败 {}'.format(self.hidden_ck, resp)
return msg
def main():
if COOKIE:
cookie_list = COOKIE.strip().split('&')
for ck in cookie_list:
xq = DDXQ(ck)
xq.check_and_sign_in()
send_msg(TITLE, '\n'.join(DDXQ.content_list))
else:
print('没有设置环境变量 DDXQ_COOKIE,无法签到')
if __name__ == '__main__':
main()
|
20,826 | 6f2848bb7cbce8a12de8886f23f54a8361541193 | from generic_convertor.generic_convertor import *
class Msleep_Convertor(Generic_Convertor):
def construct_py_params(self, perl_params, perl2py_params, perl_obj):
comment = None
if isinstance(perl_obj.params, list):
py_params = {"time":perl_obj.params[0]}
py_vars = {"py_params":py_params, "comment":comment}
return py_vars |
20,827 | 3857d2b5a64661f03acfc634d4f99f35f8969c80 | from django.http import HttpResponse
from django.template import Template, Context
from django.shortcuts import render
def clientes(request):
return render(request,"cliente.html")
def inicios(request):
return render(request,"inicio.html")
def ahorro(request):
return render(request,"crearAhorro.html")
def ventanaproveedorbasico(request):
return render(request,"servicoProveedor.html")
def proveedor(request):
return render(request,"pagoPlanilla.html")
def proveedores(request):
return render(request,"basicos.html")
def trans(request):
return render(request,"transacciones.html")
def propias(request):
return render(request,"propias.html")
def cuentaTercero(request):
return render(request,"terceros.html")
def menuCheques(request):
return render(request,"menucheque.html")
def emitirCheque(request):
return render(request,"emitir.html")
def recibirCheque(request):
return render(request,"recibir.html")
def estados(request):
return render(request,"estado.html")
def prestamos(request):
return render(request,"prestamo.html")
def iniciosAhorro(request):
return render(request,"inicioAhorro.html")
def iniciosFijo(request):
return render(request,"plazofijo.html")
def claves(request):
return render(request,"transaccionesclave.html") |
20,828 | 88aad9ed63ef36d199867bb797ce7861d8af86a2 | def print_staircase(num_of_stairs):
if num_of_stairs == 1:
return print('#')
else:
print_staircase(num_of_stairs - 1)
print('#' * num_of_stairs)
print_staircase(20)
print_staircase(10) |
20,829 | 22825328a5f12fc11ed4378647d70ac1c5b9be8b | """
给定一个整数数组和一个整数 k,你需要找到该数组中和为 k 的连续的子数组的个数。
示例 1 :
输入:nums = [1,1,1], k = 2
输出: 2 , [1,1] 与 [1,1] 为两种不同的情况。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/subarray-sum-equals-k
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from collections import defaultdict
from typing import List
# 2021.04.01 我的做法,但超时了
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
if not nums: return False
res = 0
for i in range(len(nums)):
for j in range(i, len(nums)):
s = sum(nums[i:j+1])
if s == k:
res += 1
return res
# 2021.04.01 前缀和dp,但是也超时了
class Solution2:
def subarraySum(self, nums: List[int], k: int) -> int:
if not nums: return False
res = 0
dp = [0] * (len(nums) + 1)
for i in range(len(nums)):
dp[i+1] = dp[i] + nums[i]
for i in range(len(nums)):
for j in range(i, len(nums)):
s = dp[j+1] - dp[i]
if s == k:
res += 1
return res
# 2021.04.01 前缀和+哈希,我终于理解了这个方法了
class Solution3:
def subarraySum(self, nums: List[int], k: int) -> int:
d = {}
acc = count = 0
for num in nums:
acc += num
if acc == k:
count += 1
if acc - k in d:
count += d[acc-k]
if acc in d:
d[acc] += 1
else:
d[acc] = 1
return count
# 2021.04.01 我自己的方法
class Solution4:
def subarraySum(self, nums: List[int], k: int) -> int:
dic = {}
dic[0] = 1
s = 0
count = 0
for i in range(len(nums)):
s += nums[i]
if s - k in dic:
count += dic[s-k]
if s in dic:
dic[s] += 1
else:
dic[s] = 1
return count
# 2021.04.02 综合各种方法的优化版
class Solution5:
def subarraySum(self, nums: List[int], k: int) -> int:
d = defaultdict(int)
acc = count = 0
for num in nums:
acc += num
if acc == k:
count += 1
if acc - k in d:
count += d[acc-k]
d[acc] += 1
return count
# 2021.04.16 温习一下
class Solution6:
def subarraySum(self, nums: List[int], k: int) -> int:
dic = defaultdict(int)
dic[0] = 1
s = res = 0
for i in range(len(nums)):
s += nums[i]
if s - k in dic:
res += dic[s-k]
dic[s] += 1
return res |
20,830 | 27b5b9e84b45c6f89460447381d5e277642e996b | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a program to load the data into DataFrames
"""
from IPython import get_ipython;
get_ipython().magic('reset -sf')
import pandas as pd
import os
import numpy as np
def leftpadZeros(x,n):
x = str(x)
if len(x) < n:
x = (n-len(x))*'0' + x
return x
def removeStarChar(x):
x=str(x)
if x[-1] == r'*':
x[-1] = ''
return x
#Load US stations list
stations_list_file = r'C:\Users\krish\DataScience\Project 1 - Climate Change\Data\GSOD\GSOD_AllData\isd-history.csv'
stations_list = pd.read_csv(stations_list_file)
stations_list['Station_Num'] = stations_list.USAF +'-'+stations_list.WBAN.apply(leftpadZeros,n=5)
stations_list = stations_list.set_index('Station_Num')
stations_list.drop(['USAF','WBAN','BEGIN','END','LAT','LON','ICAO','ELEV(M)','STATION NAME'],axis=1,inplace=True)
us_stations_list = stations_list[(stations_list.CTRY=='US') & (pd.notnull(stations_list.STATE))]
del us_stations_list['CTRY']
us_stations_list.columns = ['State']
# Load climate data for all US Stations
us_climate_data = pd.DataFrame()
for year in range(1975,2019):
print('loading ' + str(year) + ' data')
data_folder_loc = r'C:\Users\krish\DataScience\Project 1 - Climate Change\Data\GSOD\GSOD_AllData\gsod_' + str(year)
us_climate_data_year = pd.DataFrame()
for root, dirs, files in os.walk(data_folder_loc):
for filename in files:
for station_id in us_stations_list.index.values:
if filename[0:12] == station_id:
stn_climate_data = pd.read_csv(root+'\\'+filename,compression='gzip',delim_whitespace=True,skiprows=1,header=None,na_values=['9999.9','999.9'],parse_dates=[2])
us_climate_data_year = us_climate_data_year.append(stn_climate_data)
us_climate_data_year.columns = ['Station', 'SubStn', 'Date', 'TempAVG', 'Remove1', 'DewPoint', 'Remove2', 'SeaLevelPressure', 'Remove3', 'StationPressure', 'Remove4', 'Visibility', 'Remove5', 'WindSpeed', 'Remove6', 'WindSpeedMax', 'WindGust', 'TempMAX', 'TempMIN', 'Precipitation', 'SnowDepth', 'FRSHTT']
for i in range(1,7):
del us_climate_data_year['Remove'+str(i)]
us_climate_data_year.drop(['SeaLevelPressure','StationPressure','Visibility','WindSpeed','WindSpeedMax','WindGust','SnowDepth','Precipitation'],axis=1,inplace=True)
us_climate_data_year['Station_Num'] = us_climate_data_year.Station.apply(leftpadZeros,n=6) +'-'+us_climate_data_year.SubStn.apply(leftpadZeros,n=5)
us_climate_data_year.drop(['Station','SubStn'],axis=1,inplace=True)
us_climate_data_year['FRSHTT'] = us_climate_data_year.FRSHTT.apply(leftpadZeros,n=6)
us_climate_data_year['TempMAX'] = pd.to_numeric(us_climate_data_year.TempMAX.str.strip('*'))
us_climate_data_year['TempMIN'] = pd.to_numeric(us_climate_data_year.TempMIN.str.strip('*'))
us_climate_data_year[['Blank1','Fog','Rain','Snow','Hail','Thunder','Tornado','Blank2']] = us_climate_data_year['FRSHTT'].str.split('',expand=True)
us_climate_data_year.drop(['Blank1','Blank2','Fog','FRSHTT'],axis=1,inplace=True)
us_climate_data_year['Rain'] = pd.to_numeric(us_climate_data_year['Rain'])
us_climate_data_year['Snow'] = pd.to_numeric(us_climate_data_year['Snow'])
us_climate_data_year['Hail'] = pd.to_numeric(us_climate_data_year['Hail'])
us_climate_data_year['Thunder'] = pd.to_numeric(us_climate_data_year['Thunder'])
us_climate_data_year['Tornado'] = pd.to_numeric(us_climate_data_year['Tornado'])
us_climate_data_year.dropna(how='any')
dict_aggregation = {'TempAVG':np.mean,'TempMAX':np.mean,'TempMIN':np.mean,'Rain':np.sum,'Snow':np.sum,'Hail':np.sum,'Thunder':np.sum,'Tornado':np.sum}
us_climate_data_year = us_climate_data_year.set_index('Date').groupby('Station_Num').resample(rule='M').agg(dict_aggregation)
us_climate_data_year = us_climate_data_year.reset_index().set_index('Station_Num')
us_climate_data_year = pd.merge(us_climate_data_year,us_stations_list,on='Station_Num',how='inner')
# us_climate_data_year.to_csv(r'C:\Users\krish\DataScience\Project 1 - Climate Change\Data\GSOD\GSOD_Clean_Data\GSOD_Clean_'+str(year)+'.csv')
us_climate_data_year = us_climate_data_year.reset_index().set_index('Date','State').drop(['Station_Num'],axis=1)
us_climate_data_year = us_climate_data_year.groupby(['Date','State']).agg(dict_aggregation)
us_climate_data_year[['TempMAX','TempAVG','TempMIN']] = us_climate_data_year[['TempMAX','TempAVG','TempMIN']].interpolate()
us_climate_data = us_climate_data.append(us_climate_data_year)
us_climate_data.to_csv(r'C:\Users\krish\DataScience\Project 1 - Climate Change\All_Clean_Data\GSOD_All_Data_no_outliers.csv')
|
20,831 | d1e650d18b146725eb83d3ecdc9bc467cf86947a | import numpy as np
from sklearn import preprocessing
# Create feature
feature = np.array([[-500.5],
[-100.1],
[0],
[100.1],
[900.9]])
# Create scaler
scaler = preprocessing.StandardScaler()
# Scale feature
stdized = scaler.fit_transform(feature)
# Show feature
print(stdized) |
20,832 | 8eae631e2c36b1017b7241e9c678b485ca87fa8d | # encoding: utf8
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('poll', '0002_vote'),
]
operations = [
migrations.AddField(
model_name='vote',
name='poll',
field=models.ForeignKey(to='poll.Poll', default=None, to_field='id'),
preserve_default=False,
),
]
|
20,833 | 92d9b9a4dea4b453b5ee9c2f48f1828383f693e3 | #Import required libraries
import socket
import threading
import time
import json
#Define a server socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('127.0.0.1', 5555))
server.listen(100)
print("DNS Server is to ready to accept connections..")
#Data Base
data = open('data.json', 'r')
dnsDataBase = json.load(data)
#A function to handle clients
def handleClient(cs,cadd):
BUFSIZE = 2048
while True:
cs.send("Enter Domain name (Site): ".encode())
site = cs.recv(BUFSIZE).decode()
if site.lower() == 'exit':
break
if site in dnsDataBase:
ip = dnsDataBase[site.lower()]
msg1 = f"The Ip for {site}: >>{ip}<<\n"
msg2 = "Query succesfully replayed at: "+ str(time.asctime(time.localtime(time.time())))
response = msg1+msg2
cs.send(response.encode())
else:
response = "Query Failed>> website not exist."
cs.send(response.encode())
cs.close()
def developer(cs,cadd):
BUFSIZE = 2048
while True:
BUFSIZE = 2048
cs.send("Enter New Domain name (Site): ".encode())
site = cs.recv(BUFSIZE).decode()
if site.lower() == 'exit':
break
cs.send("Enter The Correspanding IP: ".encode())
ip = cs.recv(BUFSIZE).decode()
dnsDataBase[site] = ip
data = open('data.json', 'w')
json.dump(dnsDataBase, data)
cs.close()
#Accept clients continuously
while True:
cs, cadd = server.accept()
print("New client is connected>> "+cadd[0]+" :: "+str(cadd[1]))
msg1 = "Welcome to our DNS server.\n"
msg2 = "Enter Your State ('D' for Developer or 'U' for User): "
msg = msg1+msg2
cs.send(msg.encode())
res = cs.recv(2048).decode()
if res == 'U':
thread = threading.Thread(target=handleClient, args=(cs,cadd))
thread.start()
elif res == 'D':
thread = threading.Thread(target=developer, args=(cs, cadd))
thread.start()
else:
cs.close()
# Close the Connection
server.close()
|
20,834 | bb277905c96667adf4be798b14ad76ef8189e48d |
def path():
return 'ansible/lib/module_utils/externalpkg/extmod.py'
|
20,835 | e6766c6d2ed132cf99002142060101fa3240c3e2 | # Generated by Django 2.0.6 on 2018-06-13 11:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import userena.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserenaSignup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_active', models.DateTimeField(blank=True, help_text='The last date that the user was active.', null=True, verbose_name='last active')),
('activation_key', models.CharField(blank=True, max_length=40, verbose_name='activation key')),
('invitation_key', models.CharField(blank=True, max_length=40, verbose_name='invitation key')),
('activation_notification_send', models.BooleanField(default=False, help_text='Designates whether this user has already got a notification about activating their account.', verbose_name='notification send')),
('email_unconfirmed', models.EmailField(blank=True, help_text='Temporary email address when the user requests an email change.', max_length=254, verbose_name='unconfirmed email address')),
('email_confirmation_key', models.CharField(blank=True, max_length=40, verbose_name='unconfirmed email verification key')),
('email_confirmation_key_created', models.DateTimeField(blank=True, null=True, verbose_name='creation date of email confirmation key')),
('invitation_status', models.CharField(choices=[('INV', 'Invitation Mail was sent'), ('PSWRST', 'Password was reset by user'), ('PRFEDIT', 'Profile was edited by user')], default='INV', max_length=7)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='userena_signup', to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'userena registration',
'verbose_name_plural': 'userena registrations',
},
managers=[
('objects', userena.managers.UserenaManager()),
],
),
]
|
20,836 | f9f14b7bd22a8d733ae655ce6e7d5b93dd4f70e7 | print ("hello world !")
print ('hello zyb!')
print ('I "said" do not touch it')
#print ("I "said" do not touch it")
print ("hello", int(25+30/6))
print ("it is ture that 3+2>5-7?")
print ("it is" , 3+2>5-7)
print ("Mary had l little stone.")
print ("its fleece was white as %s." % 'snow')
print ('.' * 10)
formatter="%r %r"
print (formatter %(1,2)) |
20,837 | bd14d54145c0932c049eb207ae0ee022ebc66c59 | from .models_base import *
from sqlalchemy.orm import relationship
import datetime
#Base models:
#Experimentor
#experimentor_id2name
class experimentor_id2name(Base):
#__table__ = make_table('experimentor_id2name')
__tablename__ = 'experimentor_id2name'
experimentor_id = Column(String(50), nullable = False);
experimentor_name = Column(String(100), nullable = False);
experimentor_role = Column(String(500), nullable = False)
__table_args__ = (ForeignKeyConstraint(['experimentor_id'],['experimentor_list.experimentor_id'], onupdate="CASCADE"),
ForeignKeyConstraint(['experimentor_name'],['experimentor.experimentor_name']),
PrimaryKeyConstraint('experimentor_id','experimentor_name','experimentor_role'),
)
def __repr__(self):
return "experimentor_id2name: %s, %s, %s" % (self.experimentor_id,self.experimentor_name,self.experimentor_role)
def __repr__dict__(self):
return {"experimentor_id":self.experimentor_id,"experimentor_name":self.experimentor_name,"experimentor_role":self.experimentor_role}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
#experimentor
class experimentor(Base):
#__table__ = make_table('experimentor')
__tablename__ = 'experimentor'
experimentor_name = Column(String(100), nullable = False);
contact_information = Column(String(100))
__table_args__ = (PrimaryKeyConstraint('experimentor_name'),
)
#define relations
experimentor_id2name = relationship(experimentor_id2name);
def __repr__(self):
return "experimentor: %s, %s" % (self.experimentor_name,self.contact_information)
#TODO:
#JSON representation
def __repr__dict__(self):
return {"experimentor_name":self.experimentor_name,"contact_information":self.contact_information}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
#experimentor_list
class experimentor_list(Base):
#__table__ = make_table('experimentor_list')
__tablename__ = 'experimentor_list'
experimentor_id = Column(String(50), nullable = False)
__table_args__ = (PrimaryKeyConstraint('experimentor_id'),
)
#define relations
experimentor_id2name = relationship(experimentor_id2name);
def __repr__(self):
return "experimentor_list: %s" % (self.experimentor_id)
#TODO:
#JSON representation
def __repr__dict__(self):
return {"experimentor_list":self.experimentor_id}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
#Extraction Method
#extraction_method
class extraction_method(Base):
#__table__ = make_table('extraction_method')
__tablename__ = 'extraction_method'
id = Column(String(500), nullable = False);
extraction_method_reference = Column(String(100), nullable = False);
notes = Column(Text)
__table_args__ = (PrimaryKeyConstraint('id'),
)
#TODO:
#define relations
#define representation
def __repr__(self):
return "extraction_method: %s" % (self.id)
#JSON representation
def __repr__dict__(self):
return {"id":self.id,"extraction_method_reference":self.extraction_method_reference,"notes":self.notes}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
#Standards
#standards
class standards(Base):
#__table__ = make_table('standards')
__tablename__ = 'standards'
met_id = Column(String(50), nullable = False);
met_name = Column(String(500), nullable = False);
formula = Column(String(100));
hmdb = Column(String(500));
solubility = Column(Float);
solubility_units = Column(String(10));
mass = Column(Float);
cas_number = Column(String(100));
keggid = Column(String(100));
structure_file = Column(Text);
structure_file_extention = Column(String(10));
exactmass = Column(Float)
__table_args__ = (PrimaryKeyConstraint('met_id'),
)
def __init__(self,met_id_I,met_name_I,formula_I,hmdb_I,
solubility_I,solubility_units_I,mass_I,cas_number_I,
keggid_I,structure_file_I,structure_file_extention_I,
exactmass_I):
self.met_id=met_id_I
self.met_name=met_name_I
self.formula=formula_I
self.hmdb=hmdb_I
self.solubility=solubility_I
self.solubility_units=solubility_units_I
self.mass=mass_I
self.cas_number=cas_number_I
self.keggid=keggid_I
self.structure_file=structure_file_I
self.structure_file_extention=structure_file_extention_I
self.exactmass=exactmass_I
#TODO:
#define relations
#define representation
def __repr__(self):
return "standards: %s" % (self.met_id)
#JSON representation
def __repr__dict__(self):
return {"met_id":self.met_id}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
#standards_ordering
class standards_ordering(Base):
#__table__ = make_table('standards_ordering')
__tablename__ = 'standards_ordering'
met_id = Column(String(50), nullable = False);
met_name = Column(String(100), nullable = False);
hillcrest = Column(Boolean);
provider = Column(String(100), nullable = False);
provider_reference = Column(String(100), nullable = False);
price = Column(Float);
amount = Column(Float);
amount_units = Column(String(10));
purity = Column(Float);
mw = Column(Float);
notes = Column(String(500));
powderdate_received= Column(Date);
powderdate_opened= Column(Date);
order_standard = Column(Boolean);
standards_storage = Column(Float);
purchase = Column(Boolean)
__table_args__ = (PrimaryKeyConstraint('met_id','provider','provider_reference'),
)
def __init__(self,met_id_I,met_name_I,hillcrest_I,
provider_I,provider_reference_I,price_I,
amount_I,amount_units_I,purity_I,mw_I,
notes_I,powderdate_received_I,
powderdate_opened_I,order_standard_I,
standards_storage_I,purchase_I):
self.met_id=met_id_I
self.met_name=met_name_I
self.hillcrest=hillcrest_I
self.provider=provider_I
self.provider_reference=provider_reference_I
self.price=price_I
self.amount=amount_I
self.amount_units=amount_units_I
self.purity=purity_I
self.mw=mw_I
self.notes=notes_I
self.powderdate_received=powderdate_received_I
self.powderdate_opened=powderdate_opened_I
self.order_standard=order_standard_I
self.standards_storage=standards_storage_I
self.purchase=purchase_I
#TODO:
#define relations
#define representation
def __repr__(self):
return "standards_ordering: %s" % (self.met_id)
#JSON representation
def __repr__dict__(self):
return {"met_id":self.met_id}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
#standards2material
class standards2material(Base):
__tablename__ = 'standards2material'
met_id = Column(String(50), nullable = False);
provider = Column(String(100), nullable = False);
provider_reference = Column(String(100), nullable = False)
__table_args__ = (PrimaryKeyConstraint('met_id','provider','provider_reference'),
ForeignKeyConstraint(['met_id'],['standards.met_id']),
ForeignKeyConstraint(['met_id','provider','provider_reference'],['standards_ordering.met_id','standards_ordering.provider','standards_ordering.provider_reference']),
)
#standards_storage
class standards_storage(Base):
__tablename__ = 'standards_storage'
met_id = Column(String(50), nullable = False);
met_name = Column(String(500), nullable = False);
provider = Column(String(100), nullable = False);
provider_reference = Column(String(50), nullable = False);
powderdate= Column(Date);
stockdate= Column(Date, nullable = False);
concentration = Column(Float);
concentration_units = Column(String(10));
aliquots = Column(Integer);
solvent = Column(String(100));
ph = Column(Float);
box = Column(Integer);
posstart = Column(Integer);
posend = Column(Integer)
__table_args__ = (UniqueConstraint('met_id','stockdate'),
PrimaryKeyConstraint('met_id','provider','provider_reference','stockdate'),
ForeignKeyConstraint(['met_id','provider','provider_reference'],['standards2material.met_id','standards2material.provider','standards2material.provider_reference']),
)
#Calibrators and mixes
#mix_storage
class mix_storage(Base):
__tablename__ = 'mix_storage'
mix_id = Column(String(25), nullable = False);
mixdate= Column(Date);
box = Column(postgresql.ARRAY(Integer));
posstart = Column(postgresql.ARRAY(Integer));
posend = Column(postgresql.ARRAY(Integer))
__table_args__ = (PrimaryKeyConstraint('mix_id'),
)
#mix_description
class mix_description(Base):
__tablename__ = 'mix_description'
mix_id = Column(String(25), nullable = False);
mix_description = Column(Text, nullable = False)
__table_args__ = (PrimaryKeyConstraint('mix_id'),
)
#mix_parameters
class mix_parameters(Base):
__tablename__ = 'mix_parameters'
mix_id = Column(String(25), nullable = False);
number_of_aliquots = Column(Float, nullable = False);
mix_volume = Column(Float, nullable = False);
number_of_aliquiots = Column(Integer, nullable = False)
__table_args__ = (PrimaryKeyConstraint('mix_id'),
)
#calibrator_met_parameters
class calibrator_met_parameters(Base):
__tablename__ = 'calibrator_met_parameters'
met_id = Column(String(50), nullable = False);
dilution = Column(Integer, nullable = False)
__table_args__ = (PrimaryKeyConstraint('met_id'),
)
#calibrator2mix
class calibrator2mix(Base):
#__table__ = make_table('calibrator2mix')
__tablename__ = 'calibrator2mix'
calibrator_id = Column(Integer, nullable = False);
mix_id = Column(String(25), nullable = False)
__table_args__ = (PrimaryKeyConstraint('mix_id'),
)
def __init__(self,calibrator_id_I,mix_id_I):
self.calibrator_id=calibrator_id_I
self.mix_id=mix_id_I
#mix2met_ID
class mix2met_id(Base):
#__table__ = make_table('mix2met_id')
__tablename__ = 'mix2met_id'
mix_id = Column(String(25), nullable = False);
met_id = Column(String(50), nullable = False);
met_name = Column(String(500), nullable = False)
__table_args__ = (PrimaryKeyConstraint('met_id','mix_id'),
ForeignKeyConstraint(['mix_id'],['mix_storage.mix_id']),
ForeignKeyConstraint(['mix_id'],['calibrator2mix.mix_id']),
ForeignKeyConstraint(['mix_id'],['mix_description.mix_id']),
)
def __init__(self,mix_id_I,met_id_I,met_name_I):
self.mix_id=mix_id_I
self.met_id=met_id_I
self.met_name=met_name_I
#calibrator
class calibrator(Base):
__tablename__ = 'calibrator'
met_id = Column(String(50), nullable = False);
lloq = Column(Float);
uloq = Column(Float);
uloq_working = Column(Float);
concentration_units = Column(String(25));
stockdate= Column(Date)
__table_args__ = (UniqueConstraint('met_id','stockdate'),
PrimaryKeyConstraint('met_id'),
ForeignKeyConstraint(['met_id','stockdate'],['standards_storage.met_id','standards_storage.stockdate']),
)
#calibrator_concentrations
class calibrator_concentrations(Base):
#__table__ = make_table('calibrator_concentrations')
__tablename__ = 'calibrator_concentrations'
met_id = Column(String(50), nullable = False);
calibrator_level = Column(Integer, nullable = False);
dilution_factor = Column(Float);
calibrator_concentration = Column(Float);
concentration_units = Column(String(25))
__table_args__ = (PrimaryKeyConstraint('met_id','calibrator_level'),
)
def __init__(self,met_id_I,calibrator_level_I,dilution_factor_I,
calibrator_concentration_I,concentration_units_I):
self.met_id=met_id_I
self.calibrator_level=calibrator_level_I
self.dilution_factor=dilution_factor_I
self.calibrator_concentration=calibrator_concentration_I
self.concentration_units=concentration_units_I
#calibrator_calculations
class calibrator_calculations(Base):
__tablename__ = 'calibrator_calculations'
met_id = Column(String(50), nullable = False);
calcstart_concentration = Column(Float);
start_concentration = Column(Float)
__table_args__ = (PrimaryKeyConstraint('met_id'),
ForeignKeyConstraint(['met_id'],['calibrator.met_id']),
)
#calibrator_met2mix_calculations
class calibrator_met2mix_calculations(Base):
__tablename__ = 'calibrator_met2mix_calculations'
met_id = Column(String(50), nullable = False);
mix_id = Column(String(25), nullable = False);
working_concentration = Column(Float);
total_volume = Column(Float);
add_volume = Column(Float)
__table_args__ = (PrimaryKeyConstraint('met_id'),
ForeignKeyConstraint(['met_id'],['calibrator_met_parameters.met_id']),
ForeignKeyConstraint(['met_id'],['calibrator_calculations.met_id']),
ForeignKeyConstraint(['mix_id'],['mix_calculations.mix_id']),
ForeignKeyConstraint(['mix_id'],['mix_parameters.mix_id']),
)
#mix_calculations
class mix_calculations(Base):
__tablename__ = 'mix_calculations'
mix_id = Column(String(25), nullable = False);
number_of_compounds = Column(Integer);
total_volume_actual = Column(Float);
aliquot_volume = Column(Float);
add_to_make_aliquot_volume_even = Column(Float);
corrected_aliquot_volume = Column(Float);
volume_units = Column(String(25))
__table_args__ = (PrimaryKeyConstraint('mix_id'),
ForeignKeyConstraint(['mix_id'],['mix_parameters.mix_id']),
)
#calibrator_levels
class calibrator_levels(Base):
__tablename__ = 'calibrator_levels'
calibrator_level = Column(Integer, nullable = False);
dilution = Column(Float, nullable = False);
injectionvolume = Column(Float);
workingvolume = Column(Float);
dilution_factor_from_the_previous_level = Column(Float);
amount_from_previous_level = Column(Float);
balance_h2o = Column(Float);
dilution_concentration = Column(Float)
__table_args__ = (PrimaryKeyConstraint('calibrator_level'),
)
#MS_components
class MS_components(Base):
#__table__ = make_table('ms_components')
__tablename__ = 'ms_components'
id = Column(Integer, Sequence('ms_components_id_seq'))
q1_mass = Column(Float, nullable = False);
q3_mass = Column(Float, nullable = False);
ms3_mass = Column(Float);
met_name = Column(Text);
dp = Column(Float);
ep = Column(Float);
ce = Column(Float);
cxp = Column(Float);
af = Column(Float);
quantifier = Column(Integer);
ms_mode = Column(String(1));
ion_intensity_rank = Column(Integer);
ion_abundance = Column(Float);
precursor_formula = Column(Text);
product_ion_reference = Column(Text);
product_formula = Column(Text);
production_ion_notes = Column(Text);
met_id = Column(String(50), nullable = False);
external_reference = Column(Text);
q1_mass_units = Column(String(20));
q3_mass_units = Column(String(20));
ms3_mass_units = Column(String(20));
threshold_units = Column(String(20));
dp_units = Column(String(20));
ep_units = Column(String(20));
ce_units = Column(String(20));
cxp_units = Column(String(20));
af_units = Column(String(20));
ms_group = Column(String(100));
threshold = Column(Float, default = 5000)
dwell_weight = Column(Float, default = 1)
component_name = Column(String(500));
ms_include = Column(Boolean, default = False);
ms_is = Column(Boolean, default = False);
precursor_fragment = Column(postgresql.ARRAY(Boolean))
product_fragment = Column(postgresql.ARRAY(Boolean))
precursor_exactmass = Column(Float);
product_exactmass = Column(Float);
ms_methodtype = Column(String(20), default = 'tuning')
precursor_fragment_elements = Column(postgresql.ARRAY(String(3)))
product_fragment_elements = Column(postgresql.ARRAY(String(3)))
__table_args__ = (UniqueConstraint('component_name','ms_include'),
PrimaryKeyConstraint('met_id','q1_mass','q3_mass','ms_methodtype'),
)
def __init__(self,q1_mass_I,q3_mass_I,ms3_mass_I,
met_name_I,dp_I,ep_I,ce_I,cxp_I,af_I,
quantifier_I,ms_mode_I,ion_intensity_rank_I,
ion_abundance_I,precursor_formula_I,
product_ion_reference_I,product_formula_I,
production_ion_notes_I,met_id_I,external_reference_I,
q1_mass_units_I,q3_mass_units_I,ms3_mass_units_I,
threshold_units_I,dp_units_I,ep_units_I,ce_units_I,
cxp_units_I,af_units_I,ms_group_I,threshold_I,
dwell_weight_I,component_name_I,ms_include_I,
ms_is_I,precursor_fragment_I,product_fragment_I,
precursor_exactmass_I,product_exactmass_I,
ms_methodtype_I,
precursor_fragment_elements_I,
product_fragment_elements_I):
self.q1_mass=q1_mass_I
self.q3_mass=q3_mass_I
self.ms3_mass=ms3_mass_I
self.met_name=met_name_I
self.dp=dp_I
self.ep=ep_I
self.ce=ce_I
self.cxp=cxp_I
self.af=af_I
self.quantifier=quantifier_I
self.ms_mode=ms_mode_I
self.ion_intensity_rank=ion_intensity_rank_I
self.ion_abundance=ion_abundance_I
self.precursor_formula=precursor_formula_I
self.product_ion_reference=product_ion_reference_I
self.product_formula=product_formula_I
self.production_ion_notes=production_ion_notes_I
self.met_id=met_id_I
self.external_reference=external_reference_I
self.q1_mass_units=q1_mass_units_I
self.q3_mass_units=q3_mass_units_I
self.ms3_mass_units=ms3_mass_units_I
self.threshold_units=threshold_units_I
self.dp_units=dp_units_I
self.ep_units=ep_units_I
self.ce_units=ce_units_I
self.cxp_units=cxp_units_I
self.af_units=af_units_I
self.ms_group=ms_group_I
self.threshold=threshold_I
self.dwell_weight=dwell_weight_I
self.component_name=component_name_I
self.ms_include=ms_include_I
self.ms_is=ms_is_I
self.precursor_fragment=precursor_fragment_I
self.product_fragment=product_fragment_I
self.precursor_exactmass = precursor_exactmass_I
self.product_exactmass = product_exactmass_I
self.ms_methodtype = ms_methodtype_I
self.precursor_fragment_elements = precursor_fragment_elements_I
self.product_fragment_elements = product_fragment_elements_I
#MS_sourceParameters
class MS_sourceParameters(Base):
#__table__ = make_table('ms_sourceParameters')
__tablename__ = 'ms_sourceparameters'
id = Column(String(50), nullable = False);
ms_cur = Column(Float, nullable = False);
ms_cad = Column(String(10), nullable = False)
ms_is = Column(Float, nullable = False);
ms_tem = Column(Float, nullable = False);
ms_gs1 = Column(Float, nullable = False);
ms_gs2 = Column(Float, nullable = False)
__table_args__ = (PrimaryKeyConstraint('id'),
)
#MS_information
class MS_information(Base):
#__table__ = make_table('ms_information')
__tablename__ = 'ms_information'
manufacturer = Column(String(100), nullable = False);
id = Column(String(100), nullable = False);
serial_number = Column(String(100), nullable = False)
__table_args__ = (PrimaryKeyConstraint('id'),
)
#MS_method
class MS_method(Base):
#__table__ = make_table('ms_method')
__tablename__ = 'ms_method'
id = Column(String(50), nullable = False);
ms_sourceparameters_id = Column(String(50), nullable = False);
ms_information_id = Column(String(50), nullable = False);
ms_experiment_id = Column(String(50))
__table_args__ = (PrimaryKeyConstraint('id'),
ForeignKeyConstraint(['ms_information_id'],['ms_information.id'], onupdate="CASCADE", ondelete="CASCADE"),
ForeignKeyConstraint(['ms_sourceparameters_id'],['ms_sourceparameters.id'], onupdate="CASCADE", ondelete="CASCADE"),
)
def __init__(self,id_I, ms_sourceparameters_id_I,ms_information_id_I,ms_experiment_id_I):
self.id = id_I;
self.ms_sourceparameters_id = ms_sourceparameters_id_I;
self.ms_information_id = ms_information_id_I;
self.ms_experiment_id = ms_experiment_id_I;
#MS_component_list
class MS_component_list(Base):
#__table__ = make_table('ms_component_list')
__tablename__ = 'ms_component_list'
ms_method_id = Column(String(50), nullable = False);
q1_mass = Column(Float);
q3_mass = Column(Float);
met_id = Column(String(50));
component_name = Column(String(500), nullable = False);
ms_methodtype = Column(String(20), default = 'quantification')
__table_args__ = (PrimaryKeyConstraint('ms_method_id','component_name'),
ForeignKeyConstraint(['ms_method_id'],['ms_method.id'], onupdate="CASCADE"),
)
def __init__(self,ms_method_id_I,q1_mass_I,q3_mass_I,
met_id_I,component_name_I,ms_methodtype_I):
self.ms_method_id=ms_method_id_I
self.q1_mass=q1_mass_I
self.q3_mass=q3_mass_I
self.met_id=met_id_I
self.component_name=component_name_I
self.ms_methodtype=ms_methodtype_I
#Autosampler_parameters
class autosampler_parameters(Base):
__tablename__ = 'autosampler_parameters'
id = Column(String(50), nullable = False);
injvolume_ul = Column(Float);
washsolvent1 = Column(String(500));
washsolvent2 = Column(String(500))
__table_args__ = (PrimaryKeyConstraint('id'),
)
#Autosampler_information
class autosampler_information(Base):
__tablename__ = 'autosampler_information'
manufacturer = Column(String(100), nullable = False);
id = Column(String(100), nullable = False);
serial_number = Column(String(100), nullable = False)
__table_args__ = (PrimaryKeyConstraint('id'),
)
#Autosampler_method
class autosampler_method(Base):
__tablename__ = 'autosampler_method'
id = Column(String(50), nullable = False);
autosampler_parameters_id = Column(String(50), nullable = False);
autosampler_information_id = Column(String(50), nullable = False)
__table_args__ = (PrimaryKeyConstraint('id'),
ForeignKeyConstraint(['id'],['autosampler_method.id'], onupdate="CASCADE"),
ForeignKeyConstraint(['autosampler_information_id'],['autosampler_information.id'], onupdate="CASCADE"),
ForeignKeyConstraint(['autosampler_parameters_id'],['autosampler_parameters.id'], onupdate="CASCADE"),
)
#LC_information
class lc_information(Base):
__tablename__ = 'lc_information'
manufacturer = Column(String(100), nullable = False);
id = Column(String(100), nullable = False);
serial_number = Column(String(100), nullable = False)
__table_args__ = (PrimaryKeyConstraint('id'),
)
#LC_gradient
class lc_gradient(Base):
__tablename__ = 'lc_gradient'
id = Column(String(50), nullable = False);
lc_event = Column(postgresql.ARRAY(Integer), nullable = False);
lc_time = Column(postgresql.ARRAY(Float), nullable = False);
percent_b = Column(postgresql.ARRAY(Float), nullable = False);
flow_rate = Column(postgresql.ARRAY(Float), nullable = False);
lc_time_units = Column(String(25), nullable = False);
flow_rate_units = Column(String(25), nullable = False)
__table_args__ = (PrimaryKeyConstraint('id'),
ForeignKeyConstraint(['id'],['lc_gradient.id'], onupdate="CASCADE"),
)
#LC_parameters
class lc_parameters(Base):
__tablename__ = 'lc_parameters'
id = Column(String(50), nullable = False);
column_name = Column(String(100), nullable = False);
dimensions_and_particle_size = Column(String(100), nullable = False);
mobile_phase_a = Column(String(100), nullable = False);
mobile_phase_b = Column(String(100), nullable = False);
oven_temperature = Column(String(100), nullable = False);
notes = Column(Text)
__table_args__ = (PrimaryKeyConstraint('id'),
)
#LC_method
class lc_method(Base):
__tablename__ = 'lc_method'
id = Column(String(50), nullable = False);
lc_information_id = Column(String(100), nullable = False);
lc_gradient_id = Column(String(50), nullable = False);
lc_parameters_id = Column(String(50), nullable = False)
__table_args__ = (PrimaryKeyConstraint('id'),
ForeignKeyConstraint(['lc_information_id'],['lc_information.id'], onupdate="CASCADE"),
ForeignKeyConstraint(['lc_parameters_id'],['lc_parameters.id'], onupdate="CASCADE"),
)
#LC_elution
class lc_elution(Base):
__tablename__ = 'lc_elution'
id=Column(String(length=50), nullable = False, primary_key=True)
met_id=Column(String(length=50), nullable = False)
rt=Column(Float, default = 0.0)
ms_window=Column(Float, default = 60.0)
rt_units=Column(String(length=20))
window_units=Column(String(length=20))
__table_args__ = (PrimaryKeyConstraint('id'),
ForeignKeyConstraint(['id'],['lc_method.id'], onupdate="CASCADE"),
)
#acquisition_method
class acquisition_method(Base):
#__table__ = make_table('acquisition_method')
__tablename__ = 'acquisition_method'
id = Column(String(50), nullable = False);
ms_method_id = Column(String(50));
autosampler_method_id = Column(String(50));
lc_method_id = Column(String(50))
__table_args__ = (PrimaryKeyConstraint('id'),
ForeignKeyConstraint(['lc_method_id'],['lc_method.id'], onupdate="CASCADE"),
ForeignKeyConstraint(['ms_method_id'],['ms_method.id'], onupdate="CASCADE"),
)
def __init__(self,id_I, ms_method_id_I,autosampler_method_id_I,lc_method_id_I):
self.id = id_I;
self.ms_method_id = ms_method_id_I;
self.autosampler_method_id = autosampler_method_id_I;
self.lc_method_id = lc_method_id_I;
#quantitation_method
class quantitation_method(Base):
#__table__ = make_table('quantitation_method')
__tablename__ = 'quantitation_method'
id = Column(String(50), nullable = False);
q1_mass = Column(Float);
q3_mass = Column(Float);
met_id = Column(String(50));
component_name = Column(String(100), nullable = False);
is_name = Column(String(100))
fit = Column(String(20));
weighting = Column(String(20));
intercept = Column(Float);
slope = Column(Float);
correlation = Column(Float);
use_area = Column(Boolean, default = False)
lloq = Column(Float);
uloq = Column(Float);
points = Column(Integer)
__table_args__ = (PrimaryKeyConstraint('id','component_name'),
ForeignKeyConstraint(['id'],['quantitation_method_list.quantitation_method_id'], ondelete="CASCADE"),
)
def __init__(self,id_I, q1_mass_I,q3_mass_I,met_id_I,component_name_I,is_name_I,fit_I,
weighting_I,intercept_I,slope_I,correlation_I,use_area_I,lloq_I,uloq_I,
points_I):
self.id = id_I;
self.q1_mass = q1_mass_I;
self.q3_mass = q3_mass_I;
self.met_id = met_id_I;
self.component_name = component_name_I;
self.is_name = is_name_I;
self.fit = fit_I;
self.weighting = weighting_I;
self.intercept = intercept_I;
self.slope = slope_I;
self.correlation = correlation_I;
self.use_area = use_area_I;
self.lloq = lloq_I;
self.uloq = uloq_I;
self.points = points_I;
#quantitation_method_list
class quantitation_method_list(Base):
#__table__ = make_table('quantitation_method_list')
__tablename__ = 'quantitation_method_list'
quantitation_method_id = Column(String(50), nullable = False)
__table_args__ = (PrimaryKeyConstraint('quantitation_method_id'),
)
#Samples
#sample
class sample(Base):
#__table__ = make_table('sample')
__tablename__ = 'sample'
sample_name = Column(String(500), nullable = False);
sample_type = Column(String(100), nullable = False);
calibrator_id = Column(Integer);
calibrator_level = Column(Integer);
sample_id = Column(String(500));
sample_dilution = Column(Float, default = 1.0)
__table_args__ = (PrimaryKeyConstraint('sample_name'),
ForeignKeyConstraint(['sample_id'],['sample_storage.sample_id']),
ForeignKeyConstraint(['sample_id'],['sample_physiologicalparameters.sample_id']),
ForeignKeyConstraint(['sample_id'],['sample_description.sample_id']),
)
def __init__(self,sample_name_I,sample_type_I,calibrator_id_I,calibrator_level_I,sample_id_I,sample_dilution_I):
self.sample_name=sample_name_I;
self.sample_type=sample_type_I;
self.calibrator_id=calibrator_id_I;
self.calibrator_level=calibrator_level_I;
self.sample_id=sample_id_I;
self.sample_dilution=sample_dilution_I;
#sample_storage
class sample_storage(Base):
#__table__ = make_table('sample_storage')
__tablename__ = 'sample_storage'
sample_id=Column(String(500),nullable=False, primary_key=True)
sample_label=Column(String(50), nullable = False)
#sample_dateAndTime=Column(DateTime)
ph=Column(Float)
box=Column(Integer)
pos=Column(Integer)
def __init__(self,sample_id_I,
#sample_dateAndTime_I,
sample_label_I,ph_I,box_I,pos_I):
self.sample_id = sample_id_I
self.sample_label = sample_label_I
#self.sample_dateAndTime = sample_dateAndTime_I
self.ph = ph_I
self.box = box_I
self.pos = pos_I
#sample_physiologicalParameters
class sample_physiologicalParameters(Base):
#__table__ = make_table('sample_physiologicalparameters')
__tablename__ = 'sample_physiologicalparameters'
sample_id=Column(String(500),nullable=False, primary_key=True)
growth_condition_short=Column(Text)
growth_condition_long=Column(Text)
media_short=Column(Text)
media_long=Column(Text)
isoxic=Column(Boolean)
temperature=Column(Float)
supplementation=Column(String(100))
od600=Column(Float)
vcd=Column(Float)
culture_density=Column(Float)
culture_volume_sampled=Column(Float)
cells=Column(Float)
dcw=Column(Float)
wcw=Column(Float)
vcd_units=Column(String(10))
culture_density_units=Column(String(10))
culture_volume_sampled_units=Column(String(10))
dcw_units=Column(String(10))
wcw_units=Column(String(10))
def __init__(self,sample_id_I,growth_condition_short_I,growth_condition_long_I,
media_short_I,media_long_I,isoxic_I,temperature_I,supplementation_I,od600_I,
vcd_I,culture_density_I,culture_volume_sampled_I,cells_I,dcw_I,wcw_I,vcd_units_I,
culture_density_units_I,culture_volume_sampled_units_I,dcw_units_I,wcw_units_I):
self.sample_id = sample_id_I
self.growth_condition_short = growth_condition_short_I
self.growth_condition_long = growth_condition_long_I
self.media_short = media_short_I
self.media_long = media_long_I
self.isoxic = isoxic_I
self.temperature = temperature_I
self.supplementation = supplementation_I
self.od600 = od600_I
self.vcd = vcd_I
self.culture_density = culture_density_I
self.culture_volume_sampled = culture_volume_sampled_I
self.cells = cells_I
self.dcw = dcw_I
self.wcw = wcw_I
self.vcd_units = vcd_units_I
self.culture_density_units = culture_density_units_I
self.culture_volume_sampled_units = culture_volume_sampled_units_I
self.dcw_units = dcw_units_I
self.wcw_units = wcw_units_I
#sample_description
class sample_description(Base):
#__table__ = make_table('sample_description')
__tablename__ ='sample_description'
sample_id=Column(String(500),nullable=False, primary_key=True);
sample_name_short=Column(String(100));
sample_name_abbreviation=Column(String(50));
sample_date=Column(Date,nullable=False);
time_point=Column(String(50),nullable=False);
sample_condition=Column(String(100),nullable=False);
extraction_method_id=Column(String(500));
biological_material=Column(String(100),nullable=False);
#sample_description=Column(String(100),nullable=False);
sample_desc=Column(String(100),nullable=False);
sample_replicate=Column(Integer);
is_added=Column(Float);
is_added_units=Column(String(10));
reconstitution_volume=Column(Float);
reconstitution_volume_units=Column(String(10));
istechnical=Column(Boolean);
sample_replicate_biological=Column(Integer);
notes=Column(Text);
def __init__(self,sample_id_I,sample_name_short_I,sample_name_abbreviation_I,
sample_date_I,time_point_I,sample_condition_I,extraction_method_id_I,
biological_material_I,sample_description_I,sample_replicate_I,
is_added_I,is_added_units_I,reconstitution_volume_I,reconstitution_volume_units_I,
sample_replicate_biological_I,istechnical_I,notes_I):
self.sample_id=sample_id_I
self.sample_name_short=sample_name_short_I
self.sample_name_abbreviation=sample_name_abbreviation_I
self.sample_date=sample_date_I
self.time_point=time_point_I
self.sample_condition=sample_condition_I
self.extraction_method_id=extraction_method_id_I
self.biological_material=biological_material_I
self.sample_desc=sample_description_I
self.sample_replicate=sample_replicate_I
self.is_added=is_added_I
self.is_added_units=is_added_units_I
self.reconstitution_volume=reconstitution_volume_I
self.reconstitution_volume_units=reconstitution_volume_units_I
self.sample_replicate_biological=sample_replicate_biological_I
self.istechnical=istechnical_I
self.notes=notes_I
#sample_massVolumeConversion
class sample_massVolumeConversion(Base):
#__table__ = make_table('sample_massvolumeconversion')
__tablename__ = 'sample_massvolumeconversion'
biological_material=Column(String(100),nullable=False, primary_key=True);
conversion_name=Column(String(50),nullable=False, primary_key=True);
conversion_factor=Column(Float);
conversion_units=Column(String(50),nullable=False);
conversion_reference=Column(String(500),nullable=False);
#IS
#internal_standard
class internal_standard(Base):
#__table__ = make_table('internal_standard')
__tablename__ = 'internal_standard'
is_id = Column(Integer, nullable = False);
is_date = Column(DateTime, nullable = False);
experimentor_id = Column(String(50), nullable = False);
extraction_method_id = Column(String(50))
__table_args__ = (PrimaryKeyConstraint('is_id'),
ForeignKeyConstraint(['is_id'],['internal_standard_storage.is_id']),
)
#internal_standard_storage
class internal_standard_storage(Base):
#__table__ = make_table('internal_standard_storage')
__tablename__ = 'internal_standard_storage'
is_id = Column(Integer, nullable = False);
concentration = Column(Float);
concentration_units = Column(String(10));
aliquots = Column(Integer);
aliquot_volume = Column(Float);
aliquot_volume_units = Column(String(10));
solvent = Column(String(100));
ph = Column(Float);
box = Column(Integer);
posstart = Column(Integer);
posend = Column(Integer)
__table_args__ = (PrimaryKeyConstraint('is_id'),
)
#experiments
#experiment_types
class experiment_types(Base):
#__table__ = make_table('experiment_types')
__tablename__ = 'experiment_types'
id = Column(Integer, nullable = False);
experiment_name = Column(String(100))
__table_args__ = (PrimaryKeyConstraint('id'),
)
#experiment
class experiment(Base):
#__table__ = make_table('experiment')
__tablename__ = 'experiment'
wid = Column(Integer, Sequence('wids'),nullable=False,)
exp_type_id=Column(Integer);
id=Column(String(50),nullable=False);
sample_name=Column(String(500),nullable=False);
experimentor_id=Column(String(50));
extraction_method_id=Column(String(50));
acquisition_method_id=Column(String(50),nullable=False);
quantitation_method_id=Column(String(50));
internal_standard_id=Column(Integer);
__table_args__ = (
PrimaryKeyConstraint('id','sample_name'),
ForeignKeyConstraint(['acquisition_method_id'], ['acquisition_method.id'], onupdate="CASCADE"),
ForeignKeyConstraint(['exp_type_id'], ['experiment_types.id'], ondelete="CASCADE"),
ForeignKeyConstraint(['experimentor_id'], ['experimentor_list.experimentor_id']),
ForeignKeyConstraint(['extraction_method_id'], ['extraction_method.id']),
ForeignKeyConstraint(['internal_standard_id'], ['internal_standard.is_id']),
ForeignKeyConstraint(['quantitation_method_id'], ['quantitation_method_list.quantitation_method_id']),
ForeignKeyConstraint(['sample_name'], ['sample.sample_name']),
UniqueConstraint('wid'),
)
def __init__(self,exp_type_id_I,id_I,sample_name_I,
experimentor_id_I,extraction_method_id_I,
acquisition_method_id_I,quantitation_method_id_I,
internal_standard_id_I):
self.exp_type_id=exp_type_id_I;
self.id=id_I;
self.sample_name=sample_name_I;
self.experimentor_id=experimentor_id_I;
self.extraction_method_id=extraction_method_id_I;
self.acquisition_method_id=acquisition_method_id_I;
self.quantitation_method_id=quantitation_method_id_I;
self.internal_standard_id=internal_standard_id_I;
#TODO:
#define relations
#define representation
def __repr__(self):
return "experiment: %s" % (self.id)
#JSON representation
def __repr__dict__(self):
return {"id":self.id,
"sample_name":self.sample_name,
"experimentor_id":self.experimentor_id,
"extraction_method_ide":self.extraction_method_id,
"acquisition_method_id":self.acquisition_method_id,
"quantitation_method_id":self.quantitation_method_id,
"internal_standard_id":self.internal_standard_id}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
#data_versions
class data_versions(Base):
__tablename__ = 'data_versions'
experiment_id = Column(String(50), nullable = False);
sample_name = Column(String(500), nullable = False);
component_name = Column(String(500), nullable = False);
acquisition_date_and_time = Column(DateTime, nullable = False);
concentration_before = Column(Float);
concentration_after = Column(Float);
concentration_units_before = Column(String(20));
concentration_units_after = Column(String(20));
used_before = Column(Boolean);
used_after = Column(Boolean);
data_stage_before = Column(Integer);
data_stage_after = Column(Integer);
data_stage_modtime = Column(DateTime, default=datetime.datetime.utcnow, nullable = False);
__table_args__ = (PrimaryKeyConstraint('experiment_id','sample_name','component_name','data_stage_modtime'),
ForeignKeyConstraint(['experiment_id','sample_name'],['experiment.id','experiment.sample_name']),
ForeignKeyConstraint(['sample_name','component_name','acquisition_date_and_time'],['data_stage01_quantification_mqresultstable.sample_name','data_stage01_quantification_mqresultstable.component_name','data_stage01_quantification_mqresultstable.acquisition_date_and_time']),
)
#define representation
def __repr__(self):
return "data_versions %s" % (self.experiment_id, self.sample_name,self.component_name, self.acquisition_date_and_time)
#Biological material
#biologicalmaterial_storage
class biologicalMaterial_storage(Base):
__tablename__ = 'biologicalmaterial_storage'
biologicalmaterial_id = Column(String(100), primary_key=True)
biologicalmaterial_label = Column(String(100))
biologicalmaterial_box = Column(Integer)
biologicalmaterial_posstart = Column(Integer)
biologicalmaterial_posend = Column(Integer)
biologicalmaterial_date = Column(DateTime)
def __init__(self,biologicalmaterial_id_I,biologicalmaterial_label_I,biologicalmaterial_box_I,
biologicalmaterial_posstart_I,biologicalmaterial_posend_I,biologicalmaterial_date_I):
self.biologicalmaterial_id = biologicalmaterial_id_I
self.biologicalmaterial_label = biologicalmaterial_label_I
self.biologicalmaterial_box = biologicalmaterial_box_I
self.biologicalmaterial_posstart = biologicalmaterial_posstart_I
self.biologicalmaterial_posend = biologicalmaterial_posend_I
self.biologicalmaterial_date = biologicalmaterial_date_I
#biologicalmaterial_description
class biologicalMaterial_description(Base):
__tablename__ = 'biologicalmaterial_description'
biologicalmaterial_id = Column(String(100), primary_key=True)
biologicalmaterial_strain = Column(String(100))
biologicalmaterial_description = Column(Text)
biologicalmaterial_notes = Column(Text)
def __init__(self,biologicalmaterial_id_I,biologicalmaterial_strain_I,biologicalmaterial_description_I,
biologicalmaterial_notes_I):
self.biologicalmaterial_id = biologicalmaterial_id_I
self.biologicalmaterial_strain = biologicalmaterial_strain_I
self.biologicalmaterial_description = biologicalmaterial_description_I
self.biologicalmaterial_notes = biologicalmaterial_notes_I
#biologicalmaterial_description
class biologicalMaterial_geneReferences(Base):
__tablename__ = 'biologicalmaterial_genereferences'
id = Column(Integer, Sequence('biologicalmaterial_genereferences_id_seq'), primary_key=True)
biologicalmaterial_id = Column(String(100))
ordered_locus_name = Column(String(20))
ordered_locus_name2 = Column(String(100))
swissprot_entry_name = Column(String(20))
ac = Column(String(20))
ecogene_accession_number = Column(String(20))
gene_name = Column(String(20))
def __init__(self,biologicalmaterial_id_I,
ordered_locus_name_I,
ordered_locus_name2_I,
swissprot_entry_name_I,
ac_I,
ecogene_accession_number_I,
gene_name_I):
self.biologicalmaterial_id=biologicalmaterial_id_I
self.ordered_locus_name=ordered_locus_name_I
self.ordered_locus_name2=ordered_locus_name2_I
self.swissprot_entry_name=swissprot_entry_name_I
self.ac=ac_I
self.ecogene_accession_number=ecogene_accession_number_I
self.gene_name=gene_name_I
#TODO:
#define relations
def __repr__dict__(self):
return {'biologicalmaterial_id':self.biologicalmaterial_id,
'ordered_locus_name':self.ordered_locus_name,
'ordered_locus_name2':self.ordered_locus_name2,
'swissprot_entry_name':self.swissprot_entry_name,
'ac':self.ac,
'ecogene_accession_number':self.ecogene_accession_number,
'gene_name':self.gene_name}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
#Oligos
#oligos_description
class oligos_description(Base):
__tablename__ = 'oligos_description'
oligos_id = Column(String(100), primary_key=True)
oligos_sequence = Column(Text)
oligos_purification = Column(String(100))
oligos_description = Column(Text)
oligos_notes = Column(Text)
def __init__(self,oligos_id_I,oligos_sequence_I,
oligos_purification_I,oligos_description_I,
oligos_notes_I):
self.oligos_id = oligos_id_I
self.oligos_sequence = oligos_sequence_I
self.oligos_purification = oligos_purification_I
self.oligos_description = oligos_description_I
self.oligos_notes = oligos_notes_I
#oligos_storage
class oligos_storage(Base):
__tablename__ = 'oligos_storage'
oligos_id = Column(String(100), primary_key=True)
oligos_label = Column(String(100))
oligos_box = Column(Integer)
oligos_posstart = Column(Integer)
oligos_posend = Column(Integer)
oligos_date = Column(DateTime)
oligos_storagebuffer = Column(String(100))
oligos_concentration = Column(Float)
oligos_concentration_units = Column(String(20))
def __init__(self,oligos_id_I,oligos_label_I,oligos_box_I,
oligos_posstart_I,oligos_posend_I,oligos_date_I,
oligos_storagebuffer_I,oligos_concentration_I,
oligos_concentration_units_I):
self.oligos_id = oligos_id_I
self.oligos_label = oligos_label_I
self.oligos_box = oligos_box_I
self.oligos_posstart = oligos_posstart_I
self.oligos_posend = oligos_posend_I
self.oligos_date = oligos_date_I
self.oligos_storagebuffer = oligos_storagebuffer_I
self.oligos_concentration = oligos_concentration_I
self.oligos_concentration_units = oligos_concentration_units_I
#Models
#models_lumpedRxns
class models_lumpedRxns(Base):
__tablename__ = 'lumpedRxns'
lumped_id = Column(String(100), primary_key=True)
lumped_date = Column(DateTime)
lumped_description = Column(String(500))
rxn_id = Column(String(100))
reactions = Column(postgresql.ARRAY(String(100))) # rxn_id
stoichiometry = Column(postgresql.ARRAY(Float))
def __init__(self,lumped_id_I,lumped_date_I,lumped_description_I,rxn_id_I,reactions_I,stoichiometry_I):
self.lumped_id=lumped_id_I
self.lumped_date=lumped_date_I
self.lumped_description=lumped_description_I
self.rxn_id=rxn_id_I
self.reactions=reactions_I
self.stoichiometry=stoichiometry_I
#Project
#class project(Base):
# __tablename__ = 'project'
# id = Column(Integer, Sequence('project_id'),primary_key=True)
# project_id=Column(String(50),nullable=False); #1
# analysis_result_id = Column(String(500));
# analysis_id = Column(String(500));
# analysis_type = Column(String(100)); # time-course (i.e., multiple time points), paired (i.e., control compared to multiple replicates), group (i.e., single grouping of samples).
# analysis_method = Column(String(100)); #table identifier
# simulation_id = Column(String(500))
# simulation_dateAndTime = Column(DateTime);
# simulation_type = Column(String(50)); # sampling, fva, sra, fba, fba-loopless, pfba, etc.
# model_id = Column(String(50))
# experiment_type=Column(Integer); #2a
# experiment_id=Column(String(50)); #2
# sample_id=Column(String(500));
# sample_name_abbreviation = Column(String(100));
# sample_replicate = Column(Integer);
# sample_dateAndTime = Column(DateTime);
# time_point = Column(String(10));
# __table_args__ = (
# UniqueConstraint('project_id','analysis_id','analysis_method'),
# )
# def __init__(self,exp_type_id_I,id_I,sample_name_I,
# experimentor_id_I,extraction_method_id_I,
# acquisition_method_id_I,quantitation_method_id_I,
# internal_standard_id_I):
# self.exp_type_id=exp_type_id_I;
# self.id=id_I;
# self.sample_name=sample_name_I;
# self.experimentor_id=experimentor_id_I;
# self.extraction_method_id=extraction_method_id_I;
# self.acquisition_method_id=acquisition_method_id_I;
# self.quantitation_method_id=quantitation_method_id_I;
# self.internal_standard_id=internal_standard_id_I;
# #TODO:
# #define relations
# #define representation
# def __repr__(self):
# return "experiment: %s" % (self.id)
# #JSON representation
# def __repr__dict__(self):
# return {"id":self.id,
# "sample_name":self.sample_name,
# "experimentor_id":self.experimentor_id,
# "extraction_method_ide":self.extraction_method_id,
# "acquisition_method_id":self.acquisition_method_id,
# "quantitation_method_id":self.quantitation_method_id,
# "internal_standard_id":self.internal_standard_id}
# def __repr__json__(self):
# return json.dumps(self.__repr__dict__())
|
20,838 | 530d60aee1439e151dd4e2a345e118d9cb311cbe | from __future__ import print_function
from .layers import MoleculeConv
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import RMSprop, Adam
import numpy as np
import theano.tensor as T
from keras import initializations
from keras.utils.visualize_util import plot
import json
import datetime
import logging
import time
def build_model(embedding_size=512, attribute_vector_size=33, depth=5,
scale_output=0.05, padding=False,
mol_conv_inner_activation='tanh',
mol_conv_outer_activation='softmax',
hidden=50, hidden_activation='tanh',
output_activation='linear', output_size=1,
lr=0.01, optimizer='adam', loss='mse'):
"""
build generic cnn model that takes molecule tensor and predicts output
with size of output_size.
"""
model = Sequential()
model.add(MoleculeConv(units=embedding_size,
inner_dim=attribute_vector_size-1,
depth=depth,
scale_output=scale_output,
padding=padding,
activation_inner=mol_conv_inner_activation,
activation_output=mol_conv_outer_activation))
logging.info('cnn_model: added MoleculeConv layer ({} -> {})'.format('mol', embedding_size))
if hidden > 0:
model.add(Dense(hidden, activation=hidden_activation))
logging.info('cnn_model: added {} Dense layer (-> {})'.format(hidden_activation, hidden))
model.add(Dense(output_size, activation=output_activation))
logging.info('cnn_model: added {} Dense layer (-> {})'.format(output_activation, output_size))
# Compile
if optimizer == 'adam':
optimizer = Adam(lr=lr)
elif optimizer == 'rmsprop':
optimizer = RMSprop(lr=lr)
else:
logging.info('Can only handle adam or rmsprop optimizers currently')
quit(1)
if loss == 'custom':
loss = mse_no_NaN
logging.info('compiling cnn_model...')
model.compile(loss=loss, optimizer=optimizer)
logging.info('done compiling.')
return model
def train_model(model,
X_train,
y_train,
X_inner_val,
y_inner_val,
X_test,
y_test,
X_outer_val=None,
y_outer_val=None,
nb_epoch=0,
batch_size=50,
lr_func='0.01',
patience=10):
"""
inputs:
model - a Keras model
data - X_train, X_inner_val, X_outer_val, y_train, y_inner_val, y_outer_val, X_test, y_test
nb_epoch - number of epochs to train for
lr_func - string which is evaluated with 'epoch' to produce the learning
rate at each epoch
patience - number of epochs to wait when no progress is being made in
the validation loss
outputs:
model - a trained Keras model
loss - list of training losses corresponding to each epoch
inner_val_loss - list of validation losses corresponding to each epoch
"""
X_train = np.array(X_train)
y_train = np.array(y_train)
# Create learning rate function
lr_func_string = 'def lr(epoch):\n return {}\n'.format(lr_func)
exec lr_func_string
# Fit (allows keyboard interrupts in the middle)
try:
loss = []
inner_val_loss = []
wait = 0
prev_best_inner_val_loss = 99999999
for i in range(nb_epoch):
logging.info('\nEpoch {}/{}, lr = {}'.format(i + 1, nb_epoch, lr(i)))
this_loss = []
this_inner_val_loss = []
model.optimizer.lr.set_value(lr(i))
# Run through training set
logging.info('Training with batch size: {0}...'.format(batch_size))
epoch_training_start = time.time()
training_size = len(X_train)
batch_num = int(np.ceil(float(training_size) / batch_size))
training_order = range(training_size)
np.random.shuffle(training_order)
for batch_idx in range(batch_num):
start = batch_idx * batch_size
end = min(start + batch_size, training_size)
single_mol_as_array = X_train[training_order[start:end]]
single_y_as_array = y_train[training_order[start:end]]
sloss = model.train_on_batch(single_mol_as_array, single_y_as_array)
this_loss.append(sloss)
epoch_training_end = time.time()
logging.info('Training takes {0:0.1f} secs..'.format(epoch_training_end - epoch_training_start ))
# Run through testing set
logging.info('Inner Validating..')
for j in range(len(X_inner_val)):
single_mol_as_array = np.array(X_inner_val[j:j+1])
single_y_as_array = np.reshape(y_inner_val[j], (1, -1))
sloss = model.test_on_batch(single_mol_as_array, single_y_as_array)
this_inner_val_loss.append(sloss)
loss.append(np.mean(this_loss))
inner_val_loss.append(np.mean(this_inner_val_loss))
logging.info('mse loss: {}\tmse inner_val_loss: {}'.format(loss[i], inner_val_loss[i]))
# report outer_val and test loss
if i % 1 == 0:
if X_outer_val:
mean_outer_val_loss = evaluate_mean_tst_loss(model, X_outer_val, y_outer_val)
logging.info('mse outer_val_loss: {}'.format(mean_outer_val_loss))
mean_test_loss = evaluate_mean_tst_loss(model, X_test, y_test)
logging.info('mse test_loss: {}'.format(mean_test_loss))
# Check progress
if np.mean(this_inner_val_loss) < prev_best_inner_val_loss:
wait = 0
prev_best_inner_val_loss = np.mean(this_inner_val_loss)
if patience == -1:
model.save_weights('train_cnn_results/best.h5', overwrite=True)
else:
wait = wait + 1
logging.info('{} epochs without inner_val_loss progress'.format(wait))
if wait == patience:
logging.info('stopping early!')
break
if patience == -1:
model.load_weights('train_cnn_results/best.h5')
# evaluate outer validation loss and test loss upon final model
if X_outer_val:
mean_outer_val_loss = evaluate_mean_tst_loss(model, X_outer_val, y_outer_val)
else:
mean_outer_val_loss = None
mean_test_loss = evaluate_mean_tst_loss(model, X_test, y_test)
except KeyboardInterrupt:
logging.info('User terminated training early (intentionally)')
return (model, loss, inner_val_loss, mean_outer_val_loss, mean_test_loss)
def evaluate_mean_tst_loss(model, X_test, y_test):
"""
Given final model and test examples
returns mean test loss: a float number
"""
test_losses = []
for j in range(len(X_test)):
single_mol_as_array = np.array(X_test[j:j+1])
single_y_as_array = np.reshape(y_test[j], (1, -1))
sloss = model.test_on_batch(single_mol_as_array, single_y_as_array)
test_losses.append(sloss)
mean_test_loss = np.mean(test_losses)
return mean_test_loss
def reset_model(model):
"""
Given a Keras model consisting only of MoleculeConv, Dense, and Dropout layers,
this function will reset the trainable weights to save time for CV tests.
"""
for layer in model.layers:
# Note: these are custom depending on the layer type
if '.MoleculeConv' in str(layer):
W_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))
b_inner = np.zeros((1, layer.inner_dim))
# Inner weights
layer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \
initializations.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))
layer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval() + \
initializations.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))
# Outer weights
W_output = layer.init_output((layer.inner_dim, layer.units), scale = layer.scale_output)
b_output = np.zeros((1, layer.units))
# Initialize weights tensor
layer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))
layer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))
logging.info('graphFP layer reset')
elif '.Dense' in str(layer):
layer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))
layer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))
logging.info('dense layer reset')
elif '.Dropout' in str(layer):
logging.info('dropout unchanged')
else:
raise ValueError('Unknown layer {}, cannot reset weights'.format(str(layer)))
logging.info('Reset model weights')
return model
def save_model(model, loss, inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath):
"""
Saves NN model object and associated information.
inputs:
model - a Keras model
loss - list of training losses
inner_val_loss - list of inner validation losses
mean_test_loss - mean loss on outer validation set
mean_test_loss - mean loss on test set
fpath - root filepath to save everything to (with .json, h5, png, info
config - the configuration dictionary that defined this model
tstamp - current timestamp to log in info file
"""
# Dump data
with open(fpath + '.json', 'w') as structure_fpath:
json.dump(model.to_json(), structure_fpath)
logging.info('...saved structural information')
# Dump weights
model.save_weights(fpath + '.h5', overwrite = True)
logging.info('...saved weights')
# Dump image
try:
plot(model, to_file = fpath + '.png')
logging.info('...saved image')
except:
pass
# Dump history
save_model_history_manual(loss, inner_val_loss, fpath + '.hist')
mean_loss = loss[-1]
mean_inner_val_loss = inner_val_loss[-1]
write_loss_report(mean_loss, mean_inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath + '_loss_report.txt')
logging.info ('...saved history')
logging.info('...saved model to {}.[json, h5, png]'.format(fpath))
def save_model_history_manual(loss, val_loss, fpath):
"""
This function saves the history returned by model.fit to a tab-
delimited file, where model is a keras model
"""
# Open file
fid = open(fpath, 'a')
logging.info('trained at {}'.format(datetime.datetime.utcnow()))
print('iteration\tloss\tval_loss', file=fid)
try:
# Iterate through
for i in range(len(loss)):
print('{}\t{}\t{}'.format(i + 1,
loss[i], val_loss[i]),
file = fid)
except KeyError:
print('<no history found>', file = fid)
# Close file
fid.close()
def write_loss_report(mean_loss, mean_inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath):
"""
Write training, validation and test mean loss
"""
loss_report = open(fpath, 'a')
print("{:50} {}".format("Training loss (mse):", mean_loss), file=loss_report)
print("{:50} {}".format("Inner Validation loss (mse):", mean_inner_val_loss), file=loss_report)
print("{:50} {}".format("Outer Validation loss (mse):", mean_outer_val_loss), file=loss_report)
print("{:50} {:.4f}".format("Test loss (mse):", mean_test_loss), file=loss_report)
# Close file
loss_report.close() |
20,839 | 28dcfb003df3e61381599a656647b0e850b811f0 | from monotonic_binning.monotonic_woe_binning import Binning |
20,840 | 4f05e00c1090a1310591bcca06fcf48eba0e6da9 | from . base import BasePlugin
from lib.response import BaseResponse
import traceback
class NicPlugin(BasePlugin):
def linux(self):
ret = BaseResponse()
try:
result = self.cmd("nic")
# ret['data'] = result
ret.data = result
except Exception as e:
# ret['status'] = False
# ret['error'] = traceback.format_exc() #traceback让日志显示的更详细
ret.status = False
ret.error = traceback.format_exc()
return ret |
20,841 | d02ea42dd13dd67e4ad5bda040e4185c2d95dd88 | import math
x=int(input("Enter a number : "))
print(math.sqrt(x))
|
20,842 | 72800a6a01b347d9304d771ef8b37d2426d4529d | from django import forms
from table.models import Table
class DeleteTableFieldForCreateSetsMixin:
"""При редактировании объекта убираем table из списка филдов
"""
def get_fieldsets(self, request, obj=None):
fieldsets = super(DeleteTableFieldForCreateSetsMixin, self).get_fieldsets(request, obj)
if obj:
fields = fieldsets[0][1]['fields']
# при редактировании table убираем
del fields[fields.index('table')]
return fieldsets
class DeletePriorityFieldForCreateMixin:
"""При создании объекта убираем priority из списка филдов
"""
def get_fieldsets(self, request, obj=None):
fieldsets = super(DeletePriorityFieldForCreateMixin, self).get_fieldsets(request, obj)
if not obj:
fields = fieldsets[0][1]['fields']
# при редактировании table убираем
del fields[fields.index('priority')]
return fieldsets
class TableMixin(forms.ModelForm):
"""Миксин для добавления кастомного поля table для форм в админке
"""
table = forms.ModelChoiceField(queryset=Table.objects.order_by('name'), required=True)
name = forms.CharField(required=True)
priority = forms.IntegerField(required=True, min_value=1)
def __init__(self, *args, **kwargs):
super(TableMixin, self).__init__(*args, **kwargs)
# при редактировании таблицу не трогаем
if kwargs.get('instance'):
self.fields['table'].required = False
else:
# при создании priority - нет в форме
self.fields['priority'].required = False
|
20,843 | 54051e407ffbf792e68a149507df7aef4603f202 | # ----------------------------------------#
# Function that computes custom features #
# ----------------------------------------#
def CalculateFeatures(VideoEvents=[], ForumEvents=[],NVideoAndForum_=0,subms=[0,0]):
# Initialize features dict
Features = {}
# NVideoAndForum_=len(VideoEvents)+len(ForumEvents)
# Features for video events
TotalVideoEvents = 0
NumberOfPlays=0
if len(VideoEvents) > 0:
# Calculate custom features
# Keys: TimeStamp, EventType, VideoID, CurrentTime, OldTime, NewTime, SeekType, OldSpeed, NewSpeed
TimeStamps = VideoEvents['TimeStamp']
TimeStampDiffs = [x[0] - x[1] for x in zip(TimeStamps[1:], TimeStamps[:-1])]
DurationOfVideoActivity = TimeStamps[-1] - TimeStamps[0]
AverageVideoTimeDiffs = sum(TimeStampDiffs) / max(1, len(TimeStampDiffs))
EventsTypes = VideoEvents['EventType']
# [NEW] ADDED NEW FEATURE: NUMBER OF PAUSES
NumberOfPauses = EventsTypes.count('Video.Pause')
# [NEW] ADDED NEW FEATURE: NUMBER OF PLAYS
NumberOfPlays = EventsTypes.count('Video.Play')
# [NEW] ADDED NEW FEATURE: NUMBER OF DOWNLOADS
NumberOfDownloads = EventsTypes.count('Video.Download')
# [NEW] ADDED NEW FEATURE: NUMBER OF LOADS
NumberOfLoads= EventsTypes.count('Video.Load')
# [NEW] ADDED NEW FEATURE: NUMBER OF SPEED CHANGES
NumberOfSpeedChange= EventsTypes.count('Video.SpeedChange')
TotalVideoEvents = NumberOfPauses + NumberOfPlays + NumberOfDownloads + NumberOfLoads + NumberOfSpeedChange
# [NEW] ADDED NEW FEATURE: WATCHED (SEEN OR DOWNLOADED) AT LEAST ONE VIDEO
SeenVideo = 0
if NumberOfPlays > 0 or NumberOfDownloads > 0:
SeenVideo = 1
# [NEW] ADDED NEW FEATURE: NUMBER OF DISTINCT VIDEO IDS
DistinctIds=len(set(VideoEvents['VideoID']))
# [NEW] ADDED NEW FEATURE: PLAYS AND DOWNLOADS PER VIDEO
PlaysDownlsPerVideo=(NumberOfPlays+NumberOfDownloads)/DistinctIds
# [NEW] ADDED NEW FEATURE: PLAYS AND DOWNLOADS PER VIDEO
SelectiveNumOfEvents=NumberOfPlays+NumberOfPauses+NumberOfLoads+NumberOfSpeedChange
# [NEW] IS LAST SUBMISSION
if list(subms[0]):
if list(subms[0])[-1] ==subms[1]:
IsLastSubm=1
else: IsLastSubm=0
else:
IsLastSubm=1
TotalTimeVideo=DurationOfVideoActivity*DistinctIds
sumbNumTimesNumVidForum=subms[1]*NVideoAndForum_
# Append features to dictionary
Features.update({
'DurationOfVideoActivity': DurationOfVideoActivity,
'AverageVideoTimeDiffs': AverageVideoTimeDiffs,
'NumberOfPlays': NumberOfPlays,
'NumberOfDownloads': NumberOfDownloads,
'NumberOfPauses': NumberOfPauses,
'SeenVideo': SeenVideo,
'DistinctIds': DistinctIds,
'PlaysDownlsPerVideo': PlaysDownlsPerVideo,
'NumberOfLoads': NumberOfLoads,
'NumberOfSpeedChange': NumberOfSpeedChange,
'SelectiveNumOfEvents': SelectiveNumOfEvents,
'NVideoAndForum_': NVideoAndForum_,
'TotalVideoEvents': TotalVideoEvents,
'IsLastSubm':IsLastSubm,
'TotalTimeVideo':TotalTimeVideo,
'sumbNumTimesNumVidForum':sumbNumTimesNumVidForum
})
# Features for forum events
if len(ForumEvents) > 0:
# Calculate custom features
# Keys: TimeStamp, EventType, PostType, PostID, PostLength
EventTypes = ForumEvents['EventType']
NumberOfThreadViews = EventTypes.count('Forum.Thread.View')
PostTypes = ForumEvents['PostType']
# [NEW] ADDED NEW FEATURE: COUNT NUMBER OF COMMENTS
NumberOfComments = PostTypes.count('Comment')
# [NEW] ADDED NEW FEATURE: COUNT NUMBER OF POSTS
NumberOfPosts = PostTypes.count('Post')
# [NEW] ADDED NEW FEATURE: WEIGHTED SUM OF RELEVANT POST-TYPES
ScoreRelevantEvents = 2 * NumberOfComments + 1.5 * NumberOfPosts + 1 * NumberOfThreadViews
#[NEW] ADDED NEW FEATURE: COUNT ONLY POSTS AND COMMENTS
ComAndPost=NumberOfComments+NumberOfPosts
#[NEW] ADDED NEW FEATURE: COUNT ONLY POSTS AND COMMENTS
NumberOfThreadsLaunched = EventTypes.count('Forum.Thread.Launch')
TotalForumEvents = NumberOfThreadsLaunched + NumberOfPosts
EngagementIndex = TotalVideoEvents * TotalForumEvents
LaunchedTimesViews=NumberOfThreadsLaunched*NumberOfThreadViews
PlaysTimesThreadViews=NumberOfPlays*NumberOfThreadViews
# Append features to dictionary
Features.update({
'NumberOfThreadViews': NumberOfThreadViews,
'NumberOfComments': NumberOfComments,
'NumberOfPosts': NumberOfPosts,
'ScoreRelevantEvents': ScoreRelevantEvents,
'ComAndPost':ComAndPost,
'NumberOfThreadsLaunched': NumberOfThreadsLaunched,
'TotalForumEvents': TotalForumEvents,
'EngagementIndex': EngagementIndex,
'LaunchedTimesViews':LaunchedTimesViews,
'PlaysTimesThreadViews':PlaysTimesThreadViews
})
return Features
|
20,844 | 5815d692918b4131321edcc94c949a7dce771a9c | from model.group import Group
testdata = [Group(group_name="", group_header="", group_footer=""),
Group(group_name="my_group", group_header="my_header", group_footer="my_footer")
]
|
20,845 | 6177765f4aa79af2efbf6c8f4780e35a4ea9a88b | # -*- coding: utf-8 -*-
import wx
import wx.xrc
import math
#############################################
## Class MyFrame1
#############################################
class MyFrame1(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.DefaultPosition,
size=wx.Size(486, 448), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.SetSizeHintsSz(wx.DefaultSize, wx.DefaultSize)
bSizer1 = wx.BoxSizer(wx.VERTICAL)
self.m_textCtrl1 = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(600, 60), style=wx.TE_RIGHT)
self.m_textCtrl1.SetMinSize(wx.Size(470, 60))
bSizer1.Add(self.m_textCtrl1, 0, wx.ALL, 5)
bSizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.m_button1 = wx.Button(self, wx.ID_ANY, u"退格", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer2.Add(self.m_button1, 0, wx.ALL, 5)
self.m_button2 = wx.Button(self, wx.ID_ANY, u"清屏", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer2.Add(self.m_button2, 0, wx.ALL, 5)
self.m_button3 = wx.Button(self, wx.ID_ANY, u"sqrt", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer2.Add(self.m_button3, 0, wx.ALL, 5)
self.m_button4 = wx.Button(self, wx.ID_ANY, u"/", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer2.Add(self.m_button4, 0, wx.ALL, 5)
bSizer1.Add(bSizer2, 0, wx.EXPAND, 5)
bSizer6 = wx.BoxSizer(wx.HORIZONTAL)
self.m_button10 = wx.Button(self, wx.ID_ANY, u"7", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer6.Add(self.m_button10, 0, wx.ALL, 5)
self.m_button11 = wx.Button(self, wx.ID_ANY, u"8", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer6.Add(self.m_button11, 0, wx.ALL, 5)
self.m_button12 = wx.Button(self, wx.ID_ANY, u"9", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer6.Add(self.m_button12, 0, wx.ALL, 5)
self.m_button13 = wx.Button(self, wx.ID_ANY, u"*", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer6.Add(self.m_button13, 0, wx.ALL, 5)
bSizer1.Add(bSizer6, 0, wx.EXPAND, 5)
bSizer7 = wx.BoxSizer(wx.HORIZONTAL)
self.m_button15 = wx.Button(self, wx.ID_ANY, u"4", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer7.Add(self.m_button15, 0, wx.ALL, 5)
self.m_button16 = wx.Button(self, wx.ID_ANY, u"5", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer7.Add(self.m_button16, 0, wx.ALL, 5)
self.m_button17 = wx.Button(self, wx.ID_ANY, u"6", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer7.Add(self.m_button17, 0, wx.ALL, 5)
self.m_button18 = wx.Button(self, wx.ID_ANY, u"-", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer7.Add(self.m_button18, 0, wx.ALL, 5)
bSizer1.Add(bSizer7, 0, wx.EXPAND, 5)
bSizer34 = wx.BoxSizer(wx.HORIZONTAL)
self.m_button140 = wx.Button(self, wx.ID_ANY, u"1", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer34.Add(self.m_button140, 0, wx.ALL, 5)
self.m_button141 = wx.Button(self, wx.ID_ANY, u"2", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer34.Add(self.m_button141, 0, wx.ALL, 5)
self.m_button142 = wx.Button(self, wx.ID_ANY, u"3", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer34.Add(self.m_button142, 0, wx.ALL, 5)
self.m_button143 = wx.Button(self, wx.ID_ANY, u"+", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer34.Add(self.m_button143, 0, wx.ALL, 5)
bSizer1.Add(bSizer34, 0, wx.EXPAND, 5)
bSizer35 = wx.BoxSizer(wx.HORIZONTAL)
self.m_button145 = wx.Button(self, wx.ID_ANY, u"0", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer35.Add(self.m_button145, 0, wx.ALL, 5)
self.m_button148 = wx.Button(self, wx.ID_ANY, u".", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer35.Add(self.m_button148, 0, wx.ALL, 5)
self.m_button149 = wx.Button(self, wx.ID_ANY, u"+/-", wx.DefaultPosition, wx.Size(110, 60), 0)
bSizer35.Add(self.m_button149, 0, wx.ALL, 5)
self.m_button150 = wx.Button(self, wx.ID_ANY, u"=", wx.DefaultPosition, wx.Size(110, 600), 0)
self.m_button150.SetMinSize(wx.Size(110, 60))
bSizer35.Add(self.m_button150, 0, wx.ALL, 5)
bSizer1.Add(bSizer35, 0, wx.EXPAND, 5)
self.SetSizer(bSizer1)
self.Layout()
self.Centre(wx.BOTH)
# Connect Events
self.m_button1.Bind(wx.EVT_BUTTON, self.m_button1OnButtonClick)
self.m_button2.Bind(wx.EVT_BUTTON, self.m_button2OnButtonClick)
self.m_button3.Bind(wx.EVT_BUTTON, self.m_button3OnButtonClick)
self.m_button4.Bind(wx.EVT_BUTTON, self.m_button4OnButtonClick)
self.m_button10.Bind(wx.EVT_BUTTON, self.m_button10OnButtonClick)
self.m_button11.Bind(wx.EVT_BUTTON, self.m_button11OnButtonClick)
self.m_button12.Bind(wx.EVT_BUTTON, self.m_button12OnButtonClick)
self.m_button13.Bind(wx.EVT_BUTTON, self.m_button13OnButtonClick)
self.m_button15.Bind(wx.EVT_BUTTON, self.m_button15OnButtonClick)
self.m_button16.Bind(wx.EVT_BUTTON, self.m_button16OnButtonClick)
self.m_button17.Bind(wx.EVT_BUTTON, self.m_button17OnButtonClick)
self.m_button18.Bind(wx.EVT_BUTTON, self.m_button18OnButtonClick)
self.m_button140.Bind(wx.EVT_BUTTON, self.m_button140OnButtonClick)
self.m_button141.Bind(wx.EVT_BUTTON, self.m_button141OnButtonClick)
self.m_button142.Bind(wx.EVT_BUTTON, self.m_button142OnButtonClick)
self.m_button143.Bind(wx.EVT_BUTTON, self.m_button143OnButtonClick)
self.m_button145.Bind(wx.EVT_BUTTON, self.m_button145OnButtonClick)
self.m_button148.Bind(wx.EVT_BUTTON, self.m_button148OnButtonClick)
self.m_button149.Bind(wx.EVT_BUTTON, self.m_button149OnButtonClick)
self.m_button150.Bind(wx.EVT_BUTTON, self.m_button150OnButtonClick)
def __del__(self):
pass
# Virtual event handlers, overide them in your derived class
def m_button1OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result[:-1]
self.m_textCtrl1.SetValue(result)
def m_button2OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=''
self.m_textCtrl1.SetValue(result)
def m_button3OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=int(result)
result=math.sqrt(result)
self.m_textCtrl1.SetValue(str(result))
def m_button4OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'/'
self.m_textCtrl1.SetValue(result)
def m_button10OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'7'
self.m_textCtrl1.SetValue(result)
def m_button11OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'8'
self.m_textCtrl1.SetValue(result)
def m_button12OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'9'
self.m_textCtrl1.SetValue(result)
def m_button13OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'*'
self.m_textCtrl1.SetValue(result)
def m_button15OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'4'
self.m_textCtrl1.SetValue(result)
def m_button16OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'5'
self.m_textCtrl1.SetValue(result)
def m_button17OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'6'
self.m_textCtrl1.SetValue(result)
def m_button18OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'-'
self.m_textCtrl1.SetValue(result)
def m_button140OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'1'
self.m_textCtrl1.SetValue(result)
def m_button141OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'2'
self.m_textCtrl1.SetValue(result)
def m_button142OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'3'
self.m_textCtrl1.SetValue(result)
def m_button143OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'+'
self.m_textCtrl1.SetValue(result)
def m_button145OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'0'
self.m_textCtrl1.SetValue(result)
def m_button148OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=result+'.'
self.m_textCtrl1.SetValue(result)
def m_button149OnButtonClick(self, event):
result=self.m_textCtrl1.GetValue()
result=-int(result)
self.m_textCtrl1.SetValue(str(result))
def m_button150OnButtonClick(self, event):
self.m_textCtrl1.SetValue(str(eval(self.m_textCtrl1.GetValue())))
app=wx.App()
window=MyFrame1(None)
window.Show(True)
app.MainLoop()
|
20,846 | 3621b03ede969dd07d78b963ed2e2773528a8645 | import cv2
# create opencv image
img = cv2.imread('car.png')
# video = cv2.VideoCapture('Teslas Avoiding Accidents Compilation.mp4')
video = cv2.VideoCapture('videoplayback.mp4')
# convert to grayscale
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# create car classifier
car_tracker = cv2.CascadeClassifier('car.xml')
# haarcascade_fullbody classifier
pedestrian_tracker = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_fullbody.xml')
while True:
# read the current frame
(read_successful, frame) = video.read()
# safe coding
if read_successful:
# convert to grayscale
grayscale_vid = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
else:
break
# detect cars
cars = car_tracker.detectMultiScale(grayscale_vid)
# detect pedestrians
pedestrians = pedestrian_tracker.detectMultiScale(grayscale_vid)
# draw rectangle around cars
for(x, y, w, h) in cars:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,0,255), 2)
# draw rectangle around pedestrians
for(x, y, w, h) in pedestrians:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,255), 5)
# display the video
cv2.imshow('Self Driving Car App',frame)
key = cv2.waitKey(1)
# stop if q key pressed
if key== ord('q'): # ord() return unicode of passing character
break
# release the VideoCapture object
video.release()
"""
# detect cars
cars=car_tracker.detectMultiScale(grayscale)
print(cars)
# draw rectangle around cars
for(x, y, w, h) in cars:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 5)
# display the image with faces spotted
cv2.imshow('Self Driving Car App',img)
cv2.waitKey()
"""
print("Made by Vipul Sinha") |
20,847 | a710367e7b24238a568ea9561621dff2ad57099e | t=int(input())
for i in range(t):
str1=input()
str2=str1
str1=str1.replace('6','9')
ans=int(str1)-int(str2)
print(ans)
|
20,848 | 22da031df8e5ef383ff60fb689753994ee632a59 | import threading
import numpy as np
import h5py
class ReidGenerator():
"""docstring for ReidGenerator."""
def __init__(self, database, batch_size=32, flag="train", p=1/3):
self.database = database
self.batch_size = batch_size
self.flag = flag
self.p = p
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
# Open database
with h5py.File(self.database, "r") as db:
image_shape = db["train"]["0"].shape[1:]
batch_x_1 = np.zeros((self.batch_size, *image_shape))
batch_x_2 = np.zeros((self.batch_size, *image_shape))
batch_y = np.zeros((self.batch_size,2))
if self.flag == "train":
n_ids = len(db["train"])
# Choose positive or negative pair
pairs = np.random.choice(["positive", "negative"], p=[self.p, 1-self.p], size=self.batch_size)
for index, pair in enumerate(pairs):
if pair == "positive":
pair_id = np.random.choice(n_ids)
pair_a, pair_b = np.random.choice(len(db["train"][str(pair_id)]), size=2, replace=False)
batch_x_1[index] = db[self.flag][str(pair_id)][pair_a]
batch_x_2[index] = db[self.flag][str(pair_id)][pair_b]
batch_y[index] = [0, 1]
else:
pair_ids = np.random.choice(n_ids, 2, replace=False)
pair_a = np.random.choice(len(db[self.flag][str(pair_ids[0])]))
pair_b = np.random.choice(len(db[self.flag][str(pair_ids[1])]))
batch_x_1[index] = db[self.flag][str(pair_ids[0])][pair_a]
batch_x_2[index] = db[self.flag][str(pair_ids[1])][pair_b]
batch_y[index] = [1, 0]
elif self.flag == "validation":
n_ids = len(db["validation"])
for index in range(self.batch_size):
pair_ids = np.random.choice(n_ids, 2, replace=True)
pair_a = np.random.choice(len(db[self.flag][str(pair_ids[0])]))
pair_b = np.random.choice(len(db[self.flag][str(pair_ids[1])]))
batch_x_1[index] = db[self.flag][str(pair_ids[0])][pair_a]
batch_x_2[index] = db[self.flag][str(pair_ids[1])][pair_b]
label = (pair_a == pair_b)
batch_y[index] = [(not label), label]
return [batch_x_1, batch_x_2], batch_y
def testGenerator(database="cuhk.h5"):
# Open database
with h5py.File(database, "r") as db:
n_ids = len(db["validation"])
image_shape = db["validation"]["0"].shape[1:]
while True:
test_batch_x1 = np.zeros((n_ids, *image_shape))
test_batch_x2 = np.zeros((n_ids, *image_shape))
for index in range(n_ids):
test_batch_x1[index] = db["validation"][str(index)][0]
test_batch_x2[index] = db["validation"][str(index)][-1]
yield test_batch_x1, test_batch_x2, n_ids
def featureGenerator(database="cuhk.h5", batch_size=32, flag="train"):
# Open database
with h5py.File(database, "r") as db:
n_ids = len(db[flag])
image_shape = db[flag]["0"].shape[1:]
while True:
batch_x = np.zeros((batch_size, *image_shape))
for index in range(batch_size):
pair_id = np.random.choice(n_ids)
img_id = np.random.choice(len(db[flag][str(pair_id)]))
batch_x[index] = db[flag][str(pair_id)][img_id]
yield batch_x, batch_x
if __name__ == '__main__':
trainGenerator()
|
20,849 | 47aa423c5ac128470cf6f0c691b60de32e7e71f1 | from flask_admin import Admin
from flask_marshmallow import Marshmallow
from flask_security import Security
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
ma = Marshmallow()
security = Security()
fadmin = Admin(name='Dodo', template_mode='bootstrap3')
|
20,850 | 67568f53abca7336c4042f64a5b4465d985f1cd9 | """heap sort
author: luochang212
last update: 2020-06-20
"""
import random
import numbers
class SortTool():
"""sorting your list with
input: a list
output: a sorted list
usage:
st = SortTool()
res = st.heap_sort(nums)
"""
def heap_adjust(self, nums, s, t):
"""build heap"""
rc = nums[s]
i = s
j = 2 * i
while j <= t:
if j < t and nums[j] < nums[j + 1]:
j += 1
if rc > nums[j]:
break
nums[i] = nums[j]
i = j
j *= 2
nums[i] = rc
return nums
def heap_sort(self, nums=None):
"""main"""
if not nums:
nums = [random.randint(0, 15) for _ in range(20)]
if not all([isinstance(e, numbers.Number) for e in nums]):
nums = [int(e) for e in nums]
nums = [None]+nums
n = len(nums) - 1
for i in range(n // 2, 0, -1):
nums = self.heap_adjust(nums, i, n)
for i in range(n, 0, -1):
nums[1], nums[i] = nums[i], nums[1]
nums = self.heap_adjust(nums, 1, i - 1)
return nums[1:]
def main():
st = SortTool()
res = st.heap_sort(["9", 3, 5, 1])
print(res)
if __name__ == '__main__':
main()
|
20,851 | 4cbc946acbfd846b0d42e1fa7ea160276b5a5f1e | """!
@file camera.py
@brief
This is the script that serves as the driver for the full camera functionality as well as the picture-to-audio processing.
This is the script, uses functions from CV_algo.py in order to provide the full device functionality.
"""
## @package camera
# This namespace is used whenever referring to the camera module.
## @package picamera
# this package is used to startup the camera from code
from picamera import PiCamera
## @package RPi.GPIO
# Import Raspberry Pi GPIO library to be used to identify and use pins within Raspberry Pi
import RPi.GPIO as GPIO
## @package led
# This is the led.py script located within Software that was used for testing.
#import led #! used for testing
## @package CV_algo
# This is the CV_algo.py script located within Software that is imported to allow use of its various functions.
import CV_algo
## @package time
#This packaged is mostly used for button debouncing.
import time
#DEFINE BUTTONS:
## Pin 11 on Board = Capture/Start Button
Capture_Button = 11
## Pin 18 on Board = change language button
Lang_Change_Button = 18
## Pin 13 on Board = Pause/Play button
Button_Pause = 13
## Pin 15 on Board = Volume Up
Button_VolUp = 15
## Pin 16 on Board = Volume Down
Button_VolDown = 16
## Pin 22 on Board = Stop process
Button_Stop = 22
## Pin 24 = Speed change
Spd_Change = 24
#GLOBAL VARIABLES:
##DEFINES THE CURRENT INDEX FOR LANGUAGE IN lang_array
lang_index = 0
##DEFINES TOTAL NO. OF LANGUAGE OPTIONS
lang_options = 3
##DEFINES TOTAL NO. OF SPEED OPTIONS
spd_options = 3
##DEFINES CURRENT VALUE OF SPEED FROM spd_arr
spd_ind = 1
##ARRAY THAt DEFINES SPEED CHANGE OPTIONS
spd_arr = [0.7,1,1.3]
#---------------------------------------------FUNCTIONS-----------------------------------------------#
#Function to change the language: (from lang_array)
def LangChange(lang_index):
"""! Changes the index of the lang_index variable for the languages array (located in CV_algo.py)
@param lang_index The current value for the language array index
@return The new value of the language array index
"""
print('Changed languages') #! USED FOR TESTING
CV_algo.playCue('/home/pi/SoundBites/lang_change.mp3') #once the button has been pressed play audio cue for the user
lang_index = (lang_index + 1) % lang_options #change the value of land_index
return lang_index
#Function to change the speed: (from spd_array)
def Speed_Button(channel):
"""! Changes the value of the spd_ind variable for the spd_arr array
@param channel The current, if any, audio channel that is being used.
"""
if (CV_algo.CheckforVLC()): #if there is currently audio being output to the user change the spd_ind
global spd_ind
spd_ind = (spd_ind + 1) % spd_options
print('speed Changed') #! USED FOR TESTING
CV_algo.SpeedChange(spd_arr[spd_ind]) #change the speed using the CV_algo function
else:
#Executes if VLC process not detected (i.e., image has not been processed yet)
print('No VLC right now') #! USED FOR TESTING
#----------------------------PUSHBUTTON CONFIGURATION----------------------------------------#
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(Capture_Button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 11 to be an input pin and set initial value to be pulled low (off)
GPIO.setup(Spd_Change, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 11 to be an input pin and set initial value to be pulled low (off)
GPIO.setup(Lang_Change_Button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 11 to be an input pin and set initial value to be pulled low (off)
#SET ALL THE BUTTONS AS PULL DOWN
GPIO.setup(Button_Pause, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(Button_VolUp, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(Button_VolDown, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(Button_Stop, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#-----------------------------INTERRUPT HANDLER CONFIGURATION--------------------------------#
GPIO.add_event_detect(Button_Pause, GPIO.RISING, callback= CV_algo.PlayPause, bouncetime=1500)
GPIO.add_event_detect(Button_VolUp, GPIO.RISING, callback= CV_algo.VolumeUp, bouncetime=1500)
GPIO.add_event_detect(Button_VolDown, GPIO.RISING, callback= CV_algo.VolumeDown, bouncetime=1500)
GPIO.add_event_detect(Button_Stop, GPIO.RISING, callback= CV_algo.StopPlayback, bouncetime=1500)
GPIO.add_event_detect(Spd_Change, GPIO.RISING, callback= Speed_Button, bouncetime=1500)
#---------------------------------------CAMERA SETUP---------------------------------------------------#
##Initalize the camera so that we can use it for picture taking
camera = PiCamera()
time.sleep(2)
print("Start Now") #! USED FOR TESTING
CV_algo.playCue('/home/pi/SoundBites/Device_ready.mp3')
#!DEBUG LINES FOR TESTING CODE:
#led.blink()
#led.blink()
# MAIN FUNCTIONALITY: Run forever
while True:
#When the button is pressed take a picture, save it and call the comp vision algorithm to process it.
if GPIO.input(Capture_Button) == True:
camera.capture("/home/pi/Pictures/input.png")
print("Picture Captured")
CV_algo.playCue('/home/pi/SoundBites/Picture_captured.mp3') #play audio cue to the user that the picture has been captured
#led.blink() #!DEBUG FUNCTION
CV_algo.CV(lang_index) #set the current language index to the CV function and call the CV function
#If the language change button is clicked change the language index
elif GPIO.input(Lang_Change_Button) == True:
lang_index = LangChange(lang_index)
print(CV_algo.languages[lang_index])
time.sleep(0.5) |
20,852 | a57baf87a3390cc897aaa8dd3422b01de9693629 | from django.shortcuts import render
from .models import Info
def index(request):
return render(request, 'index.html')
def info(request):
return render(request, 'info.html')
def signupForm(request):
return render(request, 'user_sign_up.html')
def addEntry(request):
n = request.POST["InputFirstName"]
inf = Info.objects.create(name=n)
return render(request, 'index.html')
|
20,853 | bddb461004f75263f3372ba203dd298eb9e74020 | import sys
n,m=map(int,input().split())
num=list(map(int,input().split()))
num.insert(0,0)
for i in range(1,n+1):
num[i]+=num[i-1]
result=list()
for i in range(m):
s,e=map(int,input().split())
res=num[e]-num[s-1]
result.append(res)
for i in result:
print(i)
|
20,854 | 46b8a5a3b3344965637a61b67affe6523cb77916 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-10 22:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DadosPreproc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('palavras_texto_original', models.CharField(max_length=10, verbose_name='PalavrasTO')),
('palavras_texto_lematizado', models.CharField(max_length=10, verbose_name='PalavrasTL')),
('palavras_texto_lematizado_ssw', models.CharField(max_length=10, verbose_name='PalavrasTLSSW')),
],
),
migrations.CreateModel(
name='ListaVertices',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('node', models.CharField(max_length=50, verbose_name='Node')),
('index', models.IntegerField(default=-1, verbose_name='index')),
],
),
migrations.CreateModel(
name='TabelaRanking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vertice_nome', models.CharField(max_length=60, verbose_name='V_nome')),
('vertice_numero', models.IntegerField(verbose_name='V_numero')),
('grau', models.IntegerField(verbose_name='Grau')),
('betweenness', models.DecimalField(decimal_places=5, max_digits=10, verbose_name='Betweenness')),
('closeness', models.DecimalField(decimal_places=5, max_digits=10, verbose_name='Closeness')),
],
),
migrations.CreateModel(
name='TextoPreproc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vertice', models.CharField(max_length=50, verbose_name='Vertice')),
('vertice_num', models.IntegerField()),
],
),
]
|
20,855 | 51c6ab7c36919eed1304235fdf1125057aaaf414 | # -*- coding: utf-8 -*-
import tweepy
import re
import nltk
from nltk.corpus import PlaintextCorpusReader, brown
import cPickle as pickle
import operator
import random
auth = tweepy.OAuthHandler("xxx", "xxx")
auth.set_access_token("xxx", "xxx")
api = tweepy.API(auth)
directory = "PATH-TO-DIRECTORY"
bandz = pickle.load(open(directory + "thug_tokens.p", "rb"))
thugtrainer = nltk.NgramModel(3, bandz)
corpus_root = directory + "/songs"
chainzcorpus = PlaintextCorpusReader(corpus_root, '.*')
chainzwords = nltk.probability.FreqDist()
for sent in chainzcorpus.sents():
for word in sent:
chainzwords.inc(word.lower())
chainzkeys = chainzwords.keys()
brownwords = nltk.probability.FreqDist()
for sent in brown.sents():
for word in sent:
brownwords.inc(word.lower())
brownkeys = brownwords.keys()
stopwords = nltk.corpus.stopwords.words('english')
trends_US = api.trends_place(23424977)
trendlist = []
for trend in trends_US[0]['trends']:
trendlist.append(trend['name'])
trendwords = []
for trend in trendlist:
if trend.startswith('#'):
if len(re.findall('[A-Z][^A-Z]*', trend)) > 1:
trendwords.append((re.findall('[A-Z][^A-Z]*', trend), trend))
else:
pass
else:
pass
keyrank = {}
for trend in trendwords:
for word in trend[0]:
if len(word) < 2:
pass
else:
try:
keyrank[(word.lower(), trend[1])] = (1 - (chainzkeys.index(word.lower()) / float(len(chainzkeys)))) - (1 - (brownkeys.index(word.lower()) / float(len(brownkeys))))
except:
try:
keyrank[(word, trend[1])] = (1 - (chainzkeys.index(word.lower()) / float(len(chainzkeys))))
except:
pass
for k, v in keyrank.items():
if (k[0] in stopwords):
del keyrank[k]
top = max(keyrank.iteritems(), key=operator.itemgetter(1))[0]
top = max(keyrank.iteritems(), key=operator.itemgetter(1))[0]
gen_tweet = " ".join(thugtrainer.generate(random.randint(5,15), [top[0]]))
punctuation = [",", "!", ".", "'", "n't", ":", ";"]
for punct in punctuation:
gen_tweet = gen_tweet.replace(" " + punct, punct)
api.update_status(top[1] + " " + gen_tweet)
print top[1] + " " + gen_tweet
print keyrank
print top
#print "@" + top[2] + " " + gen_tweet, top[1]
#except:
# print "No useful tokens"
# pass |
20,856 | 540e8965a103e558faf09831c3164c350674f35e | from .encryptor import Encryptor
|
20,857 | 3f2466b5110b989940fca09725077fd3a9575472 | from sner.classes.display import Display
from sner.classes.rule import Rule
from sner.classes.token import Token
|
20,858 | 725ed54794fae22d8571aa8d9b1676f60b58628e | def firstDuplicateValue(array):
d = {}
a = []
# Basic for loop to add items to a dictionary
for i in range(len(array)):
if array[i] in d and array[i] not in a:
# needed a way to store both index and second occurance with [i, 1]
d[array[i]] = [i,1]
# needed another way to filter a 3rd occurance. We just append occurance 2 to a list
a.append(array[i])
elif array[i] not in d:
d[array[i]] = [i,0]
# Default min value possible. If unchanged no occurances found
minD = [0, 10000]
# Search through dictionary to find our lowest index. Update minD as we go along.
for x, y in d.items():
if y[1] == 1:
if y[0] <= minD[1]:
minD = [x,y[0]]
print(x, y)
print(minD)
# Final check
if minD[1] == 10000:
return -1
else:
return minD[0]
|
20,859 | 7ac6c5302c6f132d42d34ad4f5b95b9991484b57 | class Node(object):
def __init__(self, value):
self.value = value
self.nextnode = None
def reverse(a_head):
next_node = None
curr_node = None
prev_node = None
while a_head:
next_node = a_head.nextnode
curr_node = a_head
a_head.nextnode = prev_node
a_head = next_node
prev_node = curr_node
return curr_node
# Create a list of 4 nodes
a = Node(1)
b = Node(2)
# Set up order a,b,c,d with values 1,2,3,4
a.nextnode = b
print(reverse(a))
print(b.nextnode.value)
|
20,860 | 4c5567a2ffb97000f609b49d9e7b89cf3a8fa86c | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from edb.lang import edgeql
from edb.lang.schema import database as s_db
from edb.lang.schema import delta as s_delta
# The below must be imported here to make sure we have all
# necessary mappers from/to DDL AST.
from . import scalars # NOQA
from . import attributes # NOQA
from . import objtypes # NOQA
from . import constraints # NOQA
from . import functions # NOQA
from . import indexes # NOQA
from . import links # NOQA
from . import lproperties # NOQA
from . import modules # NOQA
from . import policy # NOQA
from . import views # NOQA
def cmd_from_ddl(stmt, *, context=None, schema, modaliases):
# expand module aliases (implicit and explicit)
ddl = edgeql.deoptimize(stmt, strip_builtins=False)
if context is None:
context = s_delta.CommandContext()
context.modaliases = modaliases
context.schema = schema
cmd = s_delta.Command.from_ast(ddl, schema=schema, context=context)
return cmd
def delta_from_ddl(stmts, *, schema, modaliases):
alter_db = s_db.AlterDatabase()
context = s_delta.CommandContext()
context.modaliases = modaliases
context.schema = schema
if isinstance(stmts, edgeql.ast.Base):
stmts = [stmts]
for stmt in stmts:
with context(s_db.DatabaseCommandContext(alter_db)):
alter_db.add(cmd_from_ddl(
stmt, context=context, schema=schema, modaliases=modaliases))
return alter_db
def ddl_from_delta(delta):
"""Return DDL AST for a delta command tree."""
return delta.get_ast()
def ddl_text_from_delta_command(delta):
"""Return DDL text for a delta command tree."""
if isinstance(delta, s_db.AlterDatabase):
commands = delta
else:
commands = [delta]
text = []
for command in commands:
delta_ast = ddl_from_delta(command)
if delta_ast:
stmt_text = edgeql.generate_source(edgeql.optimize(
delta_ast, strip_builtins=False))
text.append(stmt_text + ';')
return '\n'.join(text)
def ddl_text_from_delta(schema, delta):
"""Return DDL text for a delta object."""
text = []
for command in delta.commands:
cmd_text = ddl_text_from_delta_command(command)
text.append(cmd_text)
return '\n'.join(text)
|
20,861 | 532a464f91f655365bf1b2319454da44c4191726 | #!/usr/bin/env python
# Copyright 2021 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
from typing import Union, List, Optional
from aps.rt_sse.base import RealTimeSSEBase
from aps.sse.base import tf_masking, MaskNonLinear
from aps.transform.asr import TFTransposeTransform
from aps.streaming_asr.base.encoder import StreamingFSMNEncoder
from aps.libs import ApsRegisters
@ApsRegisters.sse.register("rt_sse@dfsmn")
class DFSMN(RealTimeSSEBase):
"""
Uses Deep FSMN for speech enhancement/separation
"""
FSMNParam = Union[List[int], int]
__constants__ = ["lctx", "rctx", "complex_mask"]
def __init__(self,
enh_transform: Optional[nn.Module] = None,
dim: int = 1024,
num_bins: int = 257,
num_branchs: int = 1,
num_layers: int = 4,
project: int = 512,
dropout: float = 0.0,
residual: bool = True,
lctx: FSMNParam = 3,
rctx: FSMNParam = 3,
norm: str = "BN",
complex_mask: bool = True,
non_linear: str = "relu",
training_mode: str = "freq"):
super(DFSMN, self).__init__(enh_transform, training_mode=training_mode)
assert enh_transform is not None
self.dfsmn = StreamingFSMNEncoder(num_bins,
num_bins * num_branchs *
(2 if complex_mask else 1),
dim=dim,
norm=norm,
project=project,
dropout=dropout,
num_layers=num_layers,
residual=residual,
lctx=lctx,
rctx=rctx)
if complex_mask:
# constraint in [-100, 100]
self.masks = nn.Sequential(MaskNonLinear("none", enable="all"),
TFTransposeTransform())
else:
self.masks = nn.Sequential(
MaskNonLinear(non_linear, enable="common"),
TFTransposeTransform())
self.num_branchs = num_branchs
self.complex_mask = complex_mask
def context(num_layers, ctx):
return num_layers * ctx if isinstance(ctx, int) else sum(ctx)
self.lctx = context(num_layers, lctx)
self.rctx = context(num_layers, rctx)
def _tf_mask(self, feats: th.Tensor) -> List[th.Tensor]:
"""
TF mask estimation from given features
"""
proj = self.dfsmn(feats, None)[0]
# N x S*F x T
masks = self.masks(proj)
# [N x F x T, ...]
return th.chunk(masks, self.num_branchs, 1)
def _infer(self, mix: th.Tensor,
mode: str) -> Union[th.Tensor, List[th.Tensor]]:
"""
Return time signals or frequency TF masks
"""
# stft: N x F x T x 2
stft, _ = self.enh_transform.encode(mix, None)
# N x T x F
feats = self.enh_transform(stft)
# N x (T+L+R) x F
feats = tf.pad(feats, (0, 0, self.lctx, self.rctx), "constant", 0)
# [N x F x T, ...]
masks = self._tf_mask(feats)
if self.complex_mask:
# [N x F x T x 2, ...]
masks = [th.stack(th.chunk(m, 2, 1), -1) for m in masks]
# post processing
if mode == "time":
bss_stft = [tf_masking(stft, m) for m in masks]
packed = self.enh_transform.decode(bss_stft)
else:
packed = masks
return packed[0] if self.num_branchs == 1 else packed
def infer(self,
mix: th.Tensor,
mode: str = "time") -> Union[th.Tensor, List[th.Tensor]]:
"""
Args:
mix (Tensor): N x S, mixture signals
Return:
[Tensor, ...]: enhanced signals or TF masks
"""
self.check_args(mix, training=False, valid_dim=[1])
with th.no_grad():
mix = mix[None, :]
ret = self._infer(mix, mode=mode)
return ret[0] if self.num_branchs == 1 else [r[0] for r in ret]
@th.jit.ignore
def forward(self, mix: th.Tensor) -> Union[th.Tensor, List[th.Tensor]]:
"""
Args:
mix (Tensor): N x S, mixture signals
Return:
[Tensor, ...]: enhanced signals or TF masks
"""
self.check_args(mix, training=True, valid_dim=[2])
return self._infer(mix, self.training_mode)
@th.jit.export
def reset(self):
self.dfsmn.reset()
@th.jit.export
def step(self, chunk: th.Tensor) -> th.Tensor:
"""
Processing one step
"""
# N x S*F x T
masks = self.masks(self.dfsmn.step(chunk))
# [N x F x T, ...]
masks = th.chunk(masks, self.num_branchs, 1)
if self.complex_mask:
# [N x F x T x 2, ...]
masks = [th.stack(th.chunk(m, 2, 1), -1) for m in masks]
# S x N x F x T or S x N x F x T x 2
masks = th.stack(masks)
return masks[0] if self.num_branchs == 1 else masks
|
20,862 | 65926429148647adb1d9c58d28ee01ead97a1b4b | """
Traveling salesman problem (large)
"""
import numpy as np
f = open('nn.txt', 'r')
ls = f.readlines()[1:]
graph = [list(map(float, i.split(' ')))[1:] for i in ls]
graph = {i: graph[i] for i in range(len(graph))}
N = len(graph)
def dis(i, j):
return (graph[i][0]-graph[j][0])**2+(graph[i][1]-graph[j][1])**2
tour = [0]
travel = 0
g = graph.copy()
g.pop(0)
while len(g) > 0:
plan = 1e9
for c in g:
d = dis(tour[-1], c)
if d < plan:
plan = d
city = c
travel += np.sqrt(plan)
tour += [city]
g.pop(city)
if len(g) % 1000 == 0:
print('Travel %i, %i cities left' % (city, len(g)))
travel += np.sqrt(dis(0, tour[-1]))
print(travel)
|
20,863 | 59188662872aa437a65968833a0ea3651ef4642f | from __future__ import absolute_import
from functools import wraps, partial
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.util import maybe_ref
try:
import asyncio
except ImportError: # pragma: nocover
try:
import trollius as asyncio
except ImportError:
raise ImportError(
'AsyncIOScheduler requires either Python 3.4 or the asyncio package installed')
def run_in_event_loop(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
wrapped = partial(func, self, *args, **kwargs)
self._eventloop.call_soon_threadsafe(wrapped)
return wrapper
class AsyncIOScheduler(BaseScheduler):
"""
A scheduler that runs on an asyncio (:pep:`3156`) event loop.
The default executor can run jobs based on native coroutines (``async def``).
Extra options:
============== =============================================================
``event_loop`` AsyncIO event loop to use (defaults to the global event loop)
============== =============================================================
"""
_eventloop = None
_timeout = None
@run_in_event_loop
def shutdown(self, wait=True):
super(AsyncIOScheduler, self).shutdown(wait)
self._stop_timer()
def _configure(self, config):
self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop()
super(AsyncIOScheduler, self)._configure(config)
def _start_timer(self, wait_seconds):
self._stop_timer()
if wait_seconds is not None:
self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup)
def _stop_timer(self):
if self._timeout:
self._timeout.cancel()
del self._timeout
@run_in_event_loop
def wakeup(self):
self._stop_timer()
wait_seconds = self._process_jobs()
self._start_timer(wait_seconds)
def _create_default_executor(self):
from apscheduler.executors.asyncio import AsyncIOExecutor
return AsyncIOExecutor()
|
20,864 | 39fa53883b5aba4e1cb55468a7b355e9e9e01c11 | # -*- coding:utf-8 -*-
# author:平手友梨奈ii
# e-mail:1353593259@qq.com
# datetime:1993/12/01
# filename:database.py
# software: PyCharm
import os
import cv2
from insightface.util.utils import preprocess
import tensorflow as tf
import json
import numpy as np
DB_PATH = './face_database'
EMB_FILE = './face_db.npy'
NAME_FILE = './name_db.npy'
class FaceDataBase:
def __init__(self):
self.insightface = tf.saved_model.load('../model_weights/saved_model').signatures['a_signature']
def add_face(self):
# TODO: write this function
pass
def initialize_db(self):
img_list = os.listdir(DB_PATH)
name_list = []
embedding_list = []
for img in img_list:
if img.endswith(('jpg', 'jpeg', 'png')):
face_name = img.split('.')[0]
# read img and embedding it
image = cv2.imread(os.path.join(DB_PATH, img))
if image.shape[0] != 112 or image.shape[1] != 112:
raise ValueError("image shape is not (112, 112)")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = preprocess(image)
embedding = self.insightface(inputs=image, drop=tf.constant(1.0))['outputs']
embedding = embedding.numpy()
embedding_list.append(embedding)
name_list.append(face_name)
names = np.array(name_list)
embeddings = np.concatenate(embedding_list, axis=0)
# save in npy file
np.save(EMB_FILE, embeddings)
np.save(NAME_FILE, names)
if __name__ == '__main__':
db = FaceDataBase()
db.initialize_db()
names = np.load(NAME_FILE)
print(names.shape)
embs = np.load(EMB_FILE)
print(embs.shape)
|
20,865 | c7f3fd3e7920f65eb6973553b598795618ea0ffd | #bibliotecas nativas
import os
import time
import re
#bibliotecas instalados p/ projeto
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from selenium import webdriver
class wppbot:
#setando o caminho do app
dir_path = os.getcwd()
#construtor da classe
def __init__(self, nome_bot):
#Setando o bot
self.bot = ChatBot(nome_bot)
#ListTrainer(self.bot)
#setando a pasta onde ta o chrome driver
self.chrome = self.dir_path+'\chromedriver.exe'
#configurando um profile pra não logar no whats toda hora
self.options = webdriver.ChromeOptions()
self.options.add_argument(r"user-data-dir="+self.dir_path+"\profile\wpp")
#iniciando o driver
self.driver = webdriver.Chrome(self.chrome,chrome_options=self.options)
# Method start
def inicia(self,nome_contato):
#Selenium vai entrar no whats e esperar 15s pra carregar tudo
self.driver.get('https://web.whatsapp.com/')
self.driver.implicitly_wait(15)
#elemento da barra de pesquisa pela classe
self.caixa_de_pesquisa = self.driver.find_element_by_class_name('jN-F5')
#escrevemos o nome do contato na caixa e esperamos 2s
self.caixa_de_pesquisa.send_keys(nome_contato)
time.sleep(2)
#buscar o contato/grupo e clicar no mesmo
self.contato = self.driver.find_element_by_xpath('//span[@title = "{}"]'.format(nome_contato))
self.contato.click()
time.sleep(2)
# Method speak start
def saudacao(self,frase_inicial):
#setando a caixa de mansagem como elemento _2S1VP
self.caixa_de_mensagem = self.driver.find_element_by_class_name('_2S1VP')
#validando se a mensagem inicial é uma lista
if type(frase_inicial) == list:
#fazendo um for para mandar cada mensagem
for frase in frase_inicial:
#Escrevemos a frase na caixa de mensagem.
self.caixa_de_mensagem.send_keys(frase)
time.sleep(1)
#Setamos o botão de enviar e clicamos para enviar.
self.botao_enviar = self.driver.find_element_by_class_name('_35EW6')
self.botao_enviar.click()
time.sleep(1)
else:
return False
# Method hear start
def escuta(self):
#setando todas as mensagens no grupo
post = self.driver.find_elements_by_class_name('_3_7SH')
#pegando o indice da ultima mensagem
ultima = len(post) - 1
#texto da ultima conversa
texto = post[ultima].find_element_by_css_selector('span.selectable-text').text
return texto
# Method Responder
def responde(self,texto):
#setando a resposta do bot na variavel response
response = self.bot.get_response(texto)
#transforma em string a resposta
response = str(response)
#coloca o prefixo bot no inicio
response = '*bot :* _'+ response +'_'
#setando a caixa de mensagem e atribuindo a resposta e enviando-a
self.caixa_de_mensagem = self.driver.find_element_by_class_name('_2S1VP')
self.caixa_de_mensagem.send_keys(response)
time.sleep(1)
self.botao_enviar = self.driver.find_element_by_class_name('_35EW6')
self.botao_enviar.click()
# Method trainer
def treina(self,nome_pasta):
#Listamos todos os arquivos dentro da pasta e para cada linha treinamos nosso bot.
for treino in os.listdir(nome_pasta):
conversas = open(nome_pasta+'/'+treino, 'r').readlines()
trainer = ListTrainer(self.bot)
trainer.train(conversas)
|
20,866 | d7309fce670fcdf5e42c589c250c5384e814ec9b | # -*- coding: utf-8 -*-
# 외부에서 import하여 사용할 함수의 정의
def printMessage(msg) :
print(f'MESSAGE : {msg}')
# 모듈을 생성하는 경우 모듈 파일 내부의
# 실행문은 import 되는 시점에서도 실행됩니다.
# 만약 모듈 내부의 실행코드가 다른 파일에서
# import 되는 경우에 실행되지 않도록 제어하기 위해
# __main__ 네임스페이스를 사용합니다.
# 현재 파일이 실행중인 경우
# __name__의 값은 __main__ 이 됩니다.
print(f"__name__ -> {__name__}")
if __name__ == '__main__' :
# 함수의 동작을 체크하는 테스트 코드
printMessage("TEST MESSAGE") |
20,867 | a1324289a35abdf2472c601abec65a9faa2d80ad | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import ListView
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
# from edxmako.shortcuts import render_to_response
from .models import Standard_Subject
# Create your views here.
class StandardList(ListView):
"""
This class contains all methods related to subject_test
"""
# template_name="subject_test/select_std.html"
# model=Standard
# context_object_name='standard_list'
def get(self, request, **kwargs):
"""
Return all standards
"""
template_name="subjecttest/select_std.html"
context={
'standard_list':Standard_Subject.objects.all()
}
return render(request,template_name,context)
class SubjectTest(ListView):
"""
This class returns questions for the test
"""
def get(self,request,**kwargs):
# import pdb;pdb.set_trace()
# data=request.POST
template_name="subjecttest/test.html"
context={
'standard_list':Standard_Subject.objects.all()
}
return render(request,template_name,context)
|
20,868 | 6ed9eba0e1f728316d9755fdb2dd77a87f99a88a | import pkg_resources
import re
from datetime import datetime, date, timedelta, time
from trac.core import *
from trac.web.chrome import ITemplateProvider, add_script, add_script_data, \
add_stylesheet, add_notice, add_ctxtnav
from trac.web import ITemplateStreamFilter
from trac.ticket.model import Milestone
from trac.web.api import IRequestFilter, IRequestHandler
from trac.util.datefmt import to_utimestamp, utc,to_timestamp
from trac.config import Option
from itertools import groupby
from operator import itemgetter
from genshi.filters.transform import Transformer
from genshi.builder import tag
from trac.env import IEnvironmentSetupParticipant
from componentdependencies import IRequireComponents
from businessintelligenceplugin.history import HistoryStorageSystem
from logicaordertracker.controller import LogicaOrderController
from trac.util.presentation import to_json
from trac.resource import ResourceNotFound
# Author: Danny Milsom <danny.milsom@cgi.com>
class BurnDownCharts(Component):
unit_value = Option('burndown', 'units', 'tickets',
doc="The units of effort for the burndown chart")
day_value = Option('burndown', 'days', 'all',
doc="The different days to include in the burndown chart.")
ideal_value = Option('burndown', 'ideal', 'fixed')
implements(ITemplateProvider, IRequestFilter,
ITemplateStreamFilter, IRequireComponents,
IEnvironmentSetupParticipant, IRequestHandler)
# IRequestFilter methods
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
"""The actual burndown chart is generated and rendered after page load
using a AJAX call which is picked up by the match_request() and
process_request() methods.
This method determines if we should send that AJAX call, by
checking the milestone has both a start and end date. If so, the
jqPlot JS files are loaded and the script data 'render_burndown'
passed via JSON."""
# check we are on an individual milestone page
if req.path_info.startswith("/milestone/") and req.args.get('id') \
and "stats" in data:
milestone = self._get_milestone(req)
if milestone:
# Load the burn down JS file
add_script(req, 'burndown/js/burndown.js')
if milestone.start:
approx_start_date = False
else:
# no milestone start value so try and estimate start date
approx_start_date = self.guess_start_date(milestone)
if not approx_start_date:
# no milestone start or estimated start date
# dont show a burn down chart
add_script_data(req, {'render_burndown': False})
return template, data, content_type
# If we do have a start date (explicit or implied),
# tell JS it should send a request via JSON and use
# the default effort value
add_script_data(req, {
'render_burndown': True,
'milestone_name': milestone.name,
'print_burndown': False,
'effort_units': self.unit_value,
'approx_start_date': approx_start_date,
})
# Add a burndown unit option to the context nav
add_ctxtnav(req, tag.div(
tag.a(
tag.i(class_="fa fa-bar-chart "),
" Burn Down Units"),
tag.ul(
tag.li(
tag.a('Tickets', href=None, id_="tickets-metric"),
),
tag.li(
tag.a('Hours', href=None, id_="hours-metric"),
),
tag.li(
tag.a('Story Points', href=None, id_="points-metric"),
),
class_="styled-dropdown fixed-max"
),
class_="dropdown-toggle inline block",
)
)
# Add a print link to the context nav
add_ctxtnav(req, tag.a(
tag.i(class_="fa fa-print"),
" Print Burn Down", id_="print-burndown"))
# Adds jqPlot library needed by burndown charts
self._add_static_files(req)
return template, data, content_type
# IRequestHandler
def match_request(self, req):
"""Requests to this URL are usually sent via AJAX or when
printing the burn down chart. In both cases we expect a URL
to start with /burndowncharts/milestone_id. Its important
that we check to see if the the milestone exists, as another
IRequestHandler opens a new milestone template when a user
references a non existant milestone. Additional arguments are
delt with by the process_request."""
match = re.match(r'/burndownchart(?:/(.+))?$', req.path_info)
if match:
if match.group(1):
# check that the milestone exists
req.args['id'] = match.group(1)
milestone = self._get_milestone(req)
if milestone:
return True
def process_request(self, req):
"""Collect the data needed for a burndown chart and pass to JavaScript.
If the original request was via AJAX we use to_json, otherwise
we return data via add_script_data.
Remember that our data reflects how a project looked at the end of a
day - so if the ticket_bi_historical table has 20 opens tickets on
the 1st December, that were 20 open tickets at the end of that day.
"""
# Get milestone object and all child milestones
# we already know it exists, as we checked in the match_request()
milestone = Milestone(self.env, req.args['id'])
tree = Milestone.build_tree(self.env)
all_milestones = [m.name for m in tree.find(milestone.name).traverse()]
# If anyone request burndownchart/milestone_id not via AJAX
# and not with a format argument (eg when printing) , we redirect to
# milestone/milestonename, as we need to load pre_process_request first
XMLHttp = req.get_header('X-Requested-With') == 'XMLHttpRequest'
if not XMLHttp and 'format' not in req.args:
req.redirect(req.href.milestone(milestone.name))
# Calculate series of dates used to render the burn down charts
start = self.get_start_date(req, milestone)
end = self.get_end_date(milestone)
day_before_start = start - timedelta(days=1)
dates = self.dates_inbetween(day_before_start, end)
# Open a database connection
self.log.debug('Connecting to the database to retrieve chart data')
db = self.env.get_read_db()
# If no metric data is posted, use the project default self.unit_value
metric = req.args.get('metric', self.unit_value)
# Remaining Effort (aka burndown) Curve
remaining_effort_args = [db, all_milestones, day_before_start, end]
burndown_series = []
if metric == 'tickets':
burndown_series = self.tickets_open_between_dates(*remaining_effort_args)
elif metric == 'hours':
burndown_series = self.hours_remaining_between_dates(*remaining_effort_args)
elif metric == 'points':
burndown_series = self.points_remaining_between_dates(*remaining_effort_args)
# If we don't have any burndown data send a message and stop
if not burndown_series:
data = {'result': False}
# For ajax request
if XMLHttp:
req.send(to_json(data), 'text/json')
else:
return 'burndown_print.html', data, None
# Team Effort Curve
team_effort = self.team_effort_curve(db, metric, all_milestones,
day_before_start, end,
self.dates_as_strings(dates))
# Ideal Curve (unit value doesnt matter)
if self.ideal_value == 'fixed':
original_estimate = burndown_series[0][1]
ideal_data = self.ideal_curve(original_estimate, day_before_start,
self.get_due_date(milestone))
data = {
'burndowndata': burndown_series,
'teameffortdata' : team_effort,
'idealcurvedata': ideal_data,
'milestone_name': milestone.name,
'start_date': str(day_before_start),
'effort_units': metric,
'yaxix_label': metric.title(),
'result' : True,
}
# we need some logic to work out the end date on the xAxis
data['due_date'] = (self.get_due_date(milestone)).strftime("%Y-%m-%d")
# Ajax request
if XMLHttp:
kwargs = { 'daysback':0,
'ticket':'on',
'ticket_details': 'on',
'ticket_milestone_%s' % Milestone._hash_name(milestone.name): 'on'
}
data.update({
'timeline_url': req.href.timeline(kwargs),
'print_burndown': False,
'render_burndown': True,
})
req.send(to_json(data), 'text/json')
# Normal request (eg the print friendly page)
else:
if req.args.get('format') == 'print':
# Load the burn down JS file and jqPlot library
add_script(req, 'burndown/js/burndown.js')
self._add_static_files(req)
result = {'data': data,
'print_burndown': True,
'render_burndown': True,
'result': True,
}
add_script_data(req, result)
return 'burndown_print.html', result, None
# Other methods for the class
def _get_milestone(self, req):
"""Returns a milestone instance if one exists, or None if it
does not."""
milestone_id = req.args['id']
try:
milestone = Milestone(self.env, milestone_id)
except ResourceNotFound:
milestone = None
return milestone
def guess_start_date(self, milestone):
"""
Approximates a start date if a milestone has no start date explicitly set.
We look in the ticket_bi_historical table for the first date a ticket
is assigned to the milestone. If the query returns a date, we use
that for our approx_start_date.
We do this so we can show users useful data even if a milestone has
no start date.
"""
db = self.env.get_read_db()
cursor = db.cursor()
cursor.execute("""
SELECT _snapshottime
FROM ticket_bi_historical
WHERE milestone = %s
ORDER BY _snapshottime ASC
LIMIT 1
""", [milestone.name])
res = cursor.fetchone()
if res:
try:
return res[0].strftime('%Y-%m-%d')
except AttributeError as e:
self.log(e)
def _get_jqplot(self, filename):
"""Quick reference to the location of jqPlot files"""
return "common/js/jqPlot/" + filename + ".js"
def _add_static_files(self, req):
"""Adds all the jqPlot JS files we need for the burndown charts"""
add_script(req, self._get_jqplot('jquery.jqplot'))
add_stylesheet(req, 'common/js/jqPlot/jquery.jqplot.css')
# excanvas is needed for IE8 support
add_script(req, self._get_jqplot('excanvas.min'))
add_script(req, self._get_jqplot('plugins/jqplot.dateAxisRenderer'))
add_script(req, self._get_jqplot('plugins/jqplot.highlighter'))
add_script(req, self._get_jqplot('plugins/jqplot.canvasTextRenderer'))
add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisTickRenderer'))
add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisLabelRenderer'))
add_script(req, self._get_jqplot('plugins/jqplot.enhancedLegendRenderer'))
def get_start_date(self, req, milestone):
"""
Returns the start date, which we use as the first coordiante
on the x-axis of burn down charts.
If the milestone has a start date set, we use this value.
If not we try and predicate this date, so can display some useful
data to users. We look to see if there is a approx_start_date var
in req.arg_list. If truthy, this date is the first time a ticket
is assigned to the milestone according to the ticket_bi_historical_table.
See the post_process_request in this component.
"""
if milestone.start:
return milestone.start.date()
elif 'approx_start_date' in req.args:
return datetime.strptime(req.args['approx_start_date'], '%Y-%m-%d').date() + timedelta(days=1)
def get_due_date(self, milestone):
"""
Returns the due date if this attribute is set for the milestone,
or the estimated end date as determinded by get_end_date().
"""
if milestone.due:
return milestone.due.date()
else:
return self.get_end_date(milestone)
def get_end_date(self, milestone):
"""
Returns the end date, which we use as the last coordinate on the
x-axis of burn down charts.
If we have milestone due date and that is in the past, use that.
Otherwise use yesterdays date. We use yesterday not today as the
history capture script only collects information at the end of each day.
"""
if milestone.due and milestone.due.date() < date.today():
return milestone.due.date()
# else we take yesterday to be the end date point for the x-axis
return date.today() - timedelta(days=1)
def team_effort_curve(self, db, metric, milestone_names, milestone_start, end, dates):
"""Returns a list of tuples, each representing the total number
of tickets closed on each day for a respective milestone. If no
tickets are closed, a tuple for that date will still be alongside
included a 0 value.
If the metric specified is tickets, the number of tickets closed
on that date will be used. If a ticket is reopened after it is
closed, we will not count that ticket as effort.
If the metric specified is hours, the total amount of work logged
in the ticket_time table against tickets in the milestone on
a given day will be used.
If the metric specified is story_points, the total amount of story points
for all tickets closed on that day will be used."""
# Convert milestone start and end dates to timestamps based on metric
if metric == 'hours':
start_stamp = to_timestamp(datetime.combine(milestone_start,
time(hour=0,minute=00,tzinfo=utc)))
end_stamp = to_utimestamp(datetime.combine(end,
time(hour=23,minute=59,tzinfo=utc)))
else: # must be tickets or story points
start_stamp = to_utimestamp(datetime.combine(milestone_start,
time(hour=0,minute=00,tzinfo=utc)))
end_stamp = to_utimestamp(datetime.combine(end,
time(hour=23,minute=59,tzinfo=utc)))
# Create a custor to access the database
cursor = db.cursor()
# Get all statuses we consider to mean that the ticket is closed
closed_statuses, types_and_statuses = self.closed_statuses_for_all_types()
# These queries all rely on a join with the ticket_bi_historical
# table to see which tickets were in the defined milestone
# on a given day
try:
if metric == 'tickets':
cursor.execute("""
SELECT c.ticket,
(timestamp with time zone 'epoch' + c.time/1000000 * INTERVAL '1 second')::date as day,
h.type,
c.oldvalue,
c.newvalue
FROM ticket_change AS c
JOIN ticket_bi_historical AS h
ON c.ticket = h.id
AND h._snapshottime = (timestamp with time zone 'epoch' + c.time/1000000 * INTERVAL '1 second')::date
WHERE h.milestone IN ({0})
AND c.time >= %s
AND c.time <= %s
AND c.field = 'status'
ORDER BY c.time ASC
""".format(db.parammarks(len(milestone_names))),
milestone_names + [start_stamp, end_stamp])
elif metric == 'hours':
cursor.execute("""
SELECT SUM(t.seconds_worked),
(timestamp with time zone 'epoch' + t.time_started * INTERVAL '1 second')::date as day
FROM ticket_time AS t
JOIN ticket_bi_historical AS h
ON t.ticket = h.id
AND h._snapshottime = (timestamp with time zone 'epoch' + t.time_started * INTERVAL '1 second')::date
WHERE h.milestone IN ({0})
AND t.time_started >= %s
AND t.time_started <= %s
GROUP BY day;
""".format(db.parammarks(len(milestone_names))),
milestone_names + [start_stamp, end_stamp])
elif metric == 'points':
cursor.execute("""
SELECT h.id,
h._snapshottime,
h.type,
c.oldvalue,
c.newvalue,
h.effort
FROM ticket_bi_historical AS h
JOIN ticket_change AS c ON h.id = c.ticket
AND h._snapshottime = (timestamp with time zone 'epoch' + c.time/1000000 * INTERVAL '1 second')::date
WHERE h.milestone IN ({0})
AND c.time >= %s
AND c.time <= %s
AND c.field = 'status'
ORDER BY h._snapshottime
""".format(db.parammarks(len(milestone_names))),
milestone_names + [start_stamp, end_stamp])
except Exception:
db.rollback()
self.log.exception('Unable to query the historical ticket table')
return []
if metric == 'hours':
work_per_date = [(i[1].strftime('%Y-%m-%d'),
float(i[0])/60/60) for i in cursor]
else:
# must be tickets or story points
work_per_date = self.count_tickets_closed(cursor, closed_statuses, metric)
# Add missing dates from milestone where no tickets were closed
set_of_dates = set([i[0] for i in work_per_date])
missing_dates = [(date, 0) for date in dates if date not in set_of_dates]
return work_per_date + missing_dates
def closed_statuses_for_all_types(self):
"""Returns a dictionary where the keys are tickets types and the associated
values are statuses from workflow status groups where closed='True'.
Essentially if a ticket is in one of these statuses, we consider it closed
and from this infer that no more work is required to complete the ticket.
"""
controller = LogicaOrderController(self.env)
closed_statuses = controller.type_and_statuses_for_closed_statusgroups()
types_and_statuses = []
for type_, statuses in closed_statuses.iteritems():
types_and_statuses.append(type_)
types_and_statuses.extend(statuses)
return closed_statuses, types_and_statuses
def tickets_in_milestone(self, milestone_names, milestone_start, end):
"""Returns a dictionary where the keys are dates between the
milestone start and end date arguments, and the associated value is
a set of all ticket ids within the milestone on that date."""
db = self.env.get_read_db()
cursor = db.cursor()
try:
cursor.execute("""
SELECT _snapshottime, id
FROM ticket_bi_historical
WHERE milestone IN ({0})
AND _snapshottime >=%s
AND _snapshottime <=%s
ORDER BY _snapshottime ASC
""".format(','.join(('%s',)*len(milestone_names))), milestone_names + [milestone_start, end])
except Exception:
db.rollback()
self.log.exception('Unable to query the historical ticket table')
return []
data = {}
for key, ticket in groupby(cursor, itemgetter(0)):
data[key] = set([])
for i in ticket:
data[key].update([i[1]])
# Note no sorting necessary as qpPlot does this for us
return data
def hours_remaining_between_dates(self, db, milestone_names,
milestone_start, end):
"""Returns a list of tuples, each with a date and total remaining hours
value for all open tickets in that milestone.
This data is used for the burndown graph, if users want to show the
remaining effort burndown on a daily basis for all tickets. As a result
if work is added to the milestone after the start date, it is reflected
in this curve.
Also note that if a ticket is closed, we consider there to be 0 remaining
hours of effort remaining - even if the remaining effort value on the
ticket suggests otherwise. This is necessary as if the ticket is closed,
it is implied that there is no further work to complete."""
self.log.debug('Querying the database for historical ticket hours data')
cursor = db.cursor()
try:
cursor.execute("""
SELECT _snapshottime,
SUM(remaininghours)
FROM ticket_bi_historical
WHERE milestone IN ({0})
AND _snapshottime >=%s
AND _snapshottime <=%s
AND isclosed = 0
GROUP BY _snapshottime
ORDER BY _snapshottime ASC
""".format(','.join(('%s',)*len(milestone_names))), milestone_names + [milestone_start, end])
except Exception:
db.rollback()
self.log.exception('Unable to query the historical ticket table')
return []
return [(str(i[0]), i[1]) for i in cursor]
def tickets_open_between_dates(self, db, milestone_names,
milestone_start, end):
"""Returns a list of tuples, each with a date and value to represent
the total amount of tickets open for that milestone on a given date.
This is primarily designed so we can draw the remaining effort curve
on the burndown chart. The first tuple will always represent the first
day of the milestone."""
self.log.debug('Querying the database for historical tickets open data')
cursor = db.cursor()
try:
cursor.execute("""
SELECT _snapshottime, COUNT(DISTINCT id)
FROM ticket_bi_historical
WHERE milestone IN ({0})
AND _snapshottime >=%s
AND _snapshottime <=%s
AND isclosed = 0
GROUP BY _snapshottime
ORDER BY _snapshottime ASC
""".format(','.join(('%s',)*len(milestone_names))), milestone_names + [milestone_start, end])
except Exception:
db.rollback()
self.log.exception('Unable to query the historical ticket table')
return []
return [(str(i[0]), i[1]) for i in cursor]
def points_remaining_between_dates(self, db, milestone_names,
milestone_start, end):
""""""
self.log.debug('Querying the database for historical effort/story point data')
cursor = db.cursor()
try:
cursor.execute("""
SELECT _snapshottime, SUM(effort)
FROM ticket_bi_historical
WHERE milestone IN ({0})
AND _snapshottime >=%s
AND _snapshottime <=%s
AND isclosed = 0
GROUP BY _snapshottime
""".format(db.parammarks(len(milestone_names))),
milestone_names + [milestone_start, end])
except Exception:
db.rollback()
self.log.exception('Unable to query the historical ticket table')
return []
return [(str(i[0]), i[1]) for i in cursor]
def work_added(self, effort_data, logged_data):
"""To calculate the amount of work added each day we find the
difference between the remaining effort data points on days n
and n-1. We then add the work logged on day n to calculate the
amount of work added (or removed)."""
# Work can be added by:
# * creating a new ticket in the milestone
# * moving a ticket into the milestone
# * increases the remaining estimated effort / story points
# Luckily we don't need to worry about that with this algorithm
remaining_difference = [(data[0], 0) if i == 0
else (data[0], data[1] - effort_data[i-1][1])
for i, data
in enumerate(sorted(effort_data, key=itemgetter(0)))]
# We assume that the remaining_difference and logged data are
# the same length, so each tuple we iterate over in each list
# relates to the same date (hence the sort)
# Note that if the remaining diff is negative, we use zero value
# see https://d4.define.logica.com/ticket/3727
return [(remaining[0], max(0, remaining[1] + logged[1]))
for remaining, logged
in zip(remaining_difference,sorted(logged_data, key=itemgetter(0)))]
def dates_inbetween(self, start, end):
"""Returns a list of datetime objects, with each item
representing a day in that period"""
return [start + timedelta(days=i) for i in xrange((end - start).days + 1)]
def get_date_values(self, all_dates):
"""Returns all working and non-working days in a milestone.
The day before the start date is always included in list list and
considered a working day. We need to take this assumption otherwise
the ideal curve would not decrease by the end of milestone start date.
"""
if self.day_value == 'all':
working_dates = all_dates[:]
non_working_dates = []
elif self.day_value == 'weekdays':
working_dates, non_working_dates = self.working_days(all_dates)
elif self.day_value == 'custom':
working_dates, non_working_dates = self.working_days(all_dates,
blacklisted_dates)
# we always want the day before the milestone starts to be a working day
# regardless if it is a weekday or weekend
# if it was a non working day the ideal effort curve would not decrease
# by the end of the actual start date
day_before = all_dates[0]
if day_before not in working_dates:
non_working_dates.remove(day_before)
working_dates.insert(0, day_before)
# else it must be in working dates already
return working_dates, non_working_dates
def working_days(self, dates, blacklisted_dates=None):
"""Expects a list of datetime objects, and if no blacklisted_dates
are passed removes any dates which fall on a Saturday or Sunday.
If blacklisted_dates is provided, all dates also in the dates
list are removed (note dates which fall on saturday or sunday have
to be explicitly included in the blacklisted_dates list).
Returns two lists, the first listing all working day and the second
containing all non working days."""
if not blacklisted_dates:
work_dates = [date2 for date2 in dates if date2.weekday() < 5]
else:
work_dates = [date2 for date2 in dates \
if date2 not in set(blacklisted_dates)]
non_working_dates = [date2 for date2 in dates \
if date2 not in set(work_dates)]
return work_dates, non_working_dates
def ideal_curve(self, original_estimate, start, due):
"""Returns the average amount of work needed to remain on each day
if the team is to finish all the work in a milestone/sprint by the
due date, taking into account non working days.
The first date is always one day before the actual milestone start
date, so users can see how much work was performed between the end
of the day before the milestone started, and the end of the first
actual day.
Also calls the dates_as_strings method first so the returned list
can be passed straight to JSON."""
# we count the day before as milestone date, but a non working one
dates = self.dates_inbetween(start, due)
working_dates, non_work_dates = self.get_date_values(dates)
try:
work_per_day = float(original_estimate) / (len(working_dates) - 1)
except ZeroDivisionError:
# the milestone is only 1 day long
work_per_day = original_estimate
working_dates_str = self.dates_as_strings(working_dates)
ideal_data = []
work_days = 0
# we set ideal_for_date and last_day_amount to original estimate
# to handle cases when the first day in the milestone is a weekend
ideal_for_date = last_day_amount = original_estimate
for date in self.dates_as_strings(dates):
if date in set(working_dates_str):
ideal_for_date = original_estimate - (work_per_day*work_days)
ideal_data.append((date, ideal_for_date))
work_days += 1
else:
ideal_data.append((date, last_day_amount))
last_day_amount = ideal_for_date
return ideal_data
def count_tickets_closed(self, cursor, closed_statuses, metric):
"""This is used to render the work logged curve, and counts
the number of tickets moved from an open to closed
status on a given day. If a ticket is re-opened
after it has already been closed on a day, we will
no longer count that ticket as closed for the purposes
of this count.
# change[0] is ticket id
# change[1] is date changed
# change[2] is ticket type
# change[3] is old status value
# change[4] is new status value
# change[5] is effort value (only for story point metric)"""
# Group changes by date
closed_per_date = []
for date, changes in groupby(cursor, itemgetter(1)):
closed_ids = []
effort = {}
for change in changes:
# Get all closed statuses for ticket type
closed_status_for_type = closed_statuses[change[2]]
# if moved from an open to closed status
if change[3] not in closed_status_for_type and change[4] in closed_status_for_type:
closed_ids.append(change[0])
# keep track of story point values for each ticket
if len(change) >= 6 and change[0] not in effort:
# use id for key, points for the value
effort[change[0]] = change[5]
# if moved from a closed status to open
if change[3] in closed_status_for_type and change[4] not in closed_status_for_type:
# remove id from our list and story points from effort
try:
closed_ids.remove(change[0])
del effort[change[0]]
except (ValueError, KeyError):
# we don't catch worry about these exceptions as
# sometimes the ticket wont be in closed_ids or effort
pass
if metric =='tickets':
# List of tuples (date, closed_count)
closed_per_date.append((date.strftime('%Y-%m-%d'), len(closed_ids)))
elif metric == 'story_points':
# List of tuples (date, total of effort for all closed tickets)
closed_per_date.append((date.strftime('%Y-%m-%d'), sum(effort.values())))
return closed_per_date
def closed_status_clause(self, closed_statuses):
"""Returns a SQL clause which lists all ticket types
and the closed statuses associated with their workflow."""
return ' OR '.join('(h.type = %%s AND c.newvalue IN (%s))'
% ','.join('%s' for status in statuses)
for type_, statuses in closed_statuses.iteritems())
def dates_as_strings(self, dates):
"""Returns string representation of all dates in a list"""
return [i.strftime('%Y-%m-%d') for i in dates]
# ITemplateStreamFilter
def filter_stream(self, req, method, filename, stream, data):
if re.match('/milestone/[^ ]', req.path_info):
help_page_url = req.href.help('DefineGuide', 'DefineAgile', 'BurndownCharts')
stream = stream | Transformer("//*[@id='milestone-overview']").after(tag(
tag.h2("Burn Down Chart ",
tag.a(
tag.i(class_="fa fa-question-circle color-muted", id_="burndown_more_info"),
href=help_page_url, target="_blank")
),
tag.div(id_='milestone-burndown', class_='milestone-info')
)
)
return stream
# ITemplateProvider methods
def get_htdocs_dirs(self):
return [('burndown', pkg_resources.resource_filename(__name__,
'htdocs'))]
def get_templates_dirs(self):
return [pkg_resources.resource_filename(__name__, 'templates')]
# IRequireComponents methods
def requires(self):
return [HistoryStorageSystem]
# IEnvironmentSetupParticipant methods
def environment_created(self):
pass
def environment_needs_upgrade(self, db):
pass
def upgrade_environment(self, db):
pass
|
20,869 | 090ed31e29ca3232113cbbe0725cc943e2c05244 | # Generated by Django 2.2.10 on 2020-08-13 13:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("workflow_handler", "0023_auto_20200813_1319"),
]
operations = [
migrations.RenameField(
model_name="webhook",
old_name="Workflow",
new_name="workflow",
),
]
|
20,870 | 6230db9be587adda1c64a607f8d54ac9d7fca300 | import unittest
import numpy as np
import numpy.testing as npt
from astroNN.apogee import (
gap_delete,
apogee_default_dr,
bitmask_decompositor,
chips_split,
bitmask_boolean,
apogee_continuum,
aspcap_mask,
combined_spectra,
visit_spectra,
)
from astroNN.apogee.apogee_shared import apogeeid_digit
class ApogeeToolsCase(unittest.TestCase):
def test_apogee_tools(self):
# Example data
raw_spectra = np.ones((10, 8575))
raw_spectrum = np.ones(8575)
wrong_spectrum = np.ones(1024)
gap_deleted = gap_delete(raw_spectra)
self.assertEqual(gap_deleted.shape == (10, 7514), True)
gap_deleted = gap_delete(raw_spectrum)
self.assertEqual(gap_deleted.shape == (1, 7514), True)
gap_deleted = gap_delete(raw_spectra, dr=12)
self.assertEqual(gap_deleted.shape == (10, 7214), True)
gap_deleted = gap_delete(raw_spectrum, dr=12)
self.assertEqual(gap_deleted.shape == (1, 7214), True)
self.assertRaises(EnvironmentError, gap_delete, wrong_spectrum)
# check gaia default dr
dr = apogee_default_dr()
self.assertEqual(dr, 17)
dr = apogee_default_dr(dr=3)
self.assertEqual(dr, 3)
# bitmask
self.assertEqual(bitmask_decompositor(0), None)
npt.assert_array_equal(bitmask_decompositor(1), [0])
npt.assert_array_equal(bitmask_decompositor(3), [0, 1])
npt.assert_array_equal(bitmask_boolean([0, 1, 2], [0]), [[False, True, False]])
self.assertRaises(ValueError, bitmask_decompositor, -1)
# chips_split
blue, green, red = chips_split(raw_spectra)
self.assertEqual(
np.concatenate((blue, green, red), axis=1).shape == (10, 7514), True
)
blue, green, red = chips_split(raw_spectrum)
self.assertEqual(
np.concatenate((blue, green, red), axis=1).shape == (1, 7514), True
)
self.assertRaises(ValueError, chips_split, raw_spectra, dr=10)
def test_apogee_continuum(self):
raw_spectra = np.ones((10, 8575)) * 2
raw_spectra_err = np.zeros((10, 8575))
# continuum
cont_spectra, cont_spectra_arr = apogee_continuum(raw_spectra, raw_spectra_err)
self.assertAlmostEqual(float(np.mean(cont_spectra)), 1.0)
def test_apogee_digit_extractor(self):
# Test apogeeid digit extractor
# just to make no error
apogeeid_digit(["2M00380508+5608579", "2M00380508+5608579"])
apogeeid_digit(np.array(["2M00380508+5608579", "2M00380508+5608579"]))
# check accuracy
self.assertEqual(apogeeid_digit("2M00380508+5608579"), "2003805085608579")
npt.assert_array_equal(
apogeeid_digit(np.array(["2M00380508+5608579", "2M00380508+5608579"])),
["2003805085608579", "2003805085608579"],
)
def test_aspcap_mask(self):
self.assertEqual(np.all(aspcap_mask("C1") == aspcap_mask("ci")), True)
self.assertEqual(np.all(aspcap_mask("TIII") == aspcap_mask("ti2")), True)
# assert for example dr=1 is not supported
self.assertRaises(ValueError, aspcap_mask, "al", 1)
# Make sure if element not found, the case is nicely handled
self.assertEqual(aspcap_mask("abc"), None)
class ApogeeDownloaderCase(unittest.TestCase):
def test_apogee_combined_download(self):
"""
Test APOGEE combined spectra downloading function, assert functions can deal with missing files
"""
# make sure the download works correctly
combined_spectra(dr=13, location=4405, apogee="2M19060637+4717296")
combined_spectra(dr=14, location=4405, apogee="2M19060637+4717296")
combined_spectra(
dr=16, field="K06_078+16", telescope="apo25m", apogee="2M19060637+4717296"
)
combined_spectra(
dr=17, field="K06_078+16", telescope="apo25m", apogee="2M19060637+4717296"
)
# assert False is returning if file not found
self.assertEqual(
combined_spectra(dr=13, location=4406, apogee="2M19060637+4717296"), False
)
self.assertEqual(
combined_spectra(dr=14, location=4406, apogee="2M19060637+4717296"), False
)
self.assertEqual(
combined_spectra(
dr=16,
field="K06_078+17",
telescope="apo25m",
apogee="2M19060637+4717296",
),
False,
)
self.assertEqual(
combined_spectra(
dr=17,
field="K06_078+17",
telescope="apo25m",
apogee="2M19060637+4717296",
),
False,
)
# assert error if DR not supported
self.assertRaises(
ValueError,
combined_spectra,
dr=1,
location=4406,
apogee="2M19060637+4717296",
)
def test_apogee_visit_download(self):
"""
Test APOGEE visits spectra downloading function, assert functions can deal with missing files
"""
# make sure the download works correctly
visit_spectra(dr=13, location=4405, apogee="2M19060637+4717296")
visit_spectra(dr=14, location=4405, apogee="2M19060637+4717296")
visit_spectra(
dr=16, field="K06_078+16", telescope="apo25m", apogee="2M19060637+4717296"
)
visit_spectra(
dr=17, field="K06_078+16", telescope="apo25m", apogee="2M19060637+4717296"
)
# assert False is returning if file not found
self.assertEqual(
visit_spectra(dr=13, location=4406, apogee="2M19060637+4717296"), False
)
self.assertEqual(
visit_spectra(dr=14, location=4406, apogee="2M19060637+4717296"), False
)
self.assertEqual(
visit_spectra(
dr=16,
field="K06_078+17",
telescope="apo25m",
apogee="2M19060637+4717296",
),
False,
)
self.assertEqual(
visit_spectra(
dr=17,
field="K06_078+17",
telescope="apo25m",
apogee="2M19060637+4717296",
),
False,
)
# assert error if DR not supported
self.assertRaises(
ValueError, visit_spectra, dr=1, location=4406, apogee="2M19060637+4717296"
)
if __name__ == "__main__":
unittest.main()
|
20,871 | 6ff1643cfeafd0ee11623f2750c46caf6510bacf | #!/usr/bin/python
import rospy
import roslib
import web
import signal
from json import dumps
from datetime import datetime
from time import mktime, strptime, time
from bson import json_util
from os import _exit
from urllib import urlencode
from os import chdir
from frongo.srv import PredictStateOrder
from frongo.srv import PredictState
from frongo.srv import GetInfo
### Templates
TEMPLATE_DIR = roslib.packages.get_pkg_dir('frongoweb') + '/www'
chdir(TEMPLATE_DIR)
DATETIME_PATTERN = '%d.%m.%Y %H:%M'
DATETIME_PATTERN_JS = 'dd.mm.yyyy hh:ii'
renderer = web.template.render(TEMPLATE_DIR, base="base", globals=globals())
urls = (
'/', 'Index',
'/query', 'Query'
)
class FrongoApp(web.application):
def run(self, port, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
class Index:
def GET(self):
user_data = web.input()
frongo = FrongoBridge()
info = frongo.get_info()
data = {
'submit_url': '/query',
'queries': info,
'datetime_format': DATETIME_PATTERN_JS,
'def_from': user_data['from'] if 'from' in user_data else '',
'def_to': user_data['to'] if 'to' in user_data else '',
'def_order': user_data['order'] if 'order' in user_data else '0',
'def_model': user_data['model'] if 'model' in user_data else '',
}
return renderer.index(data)
class FrongoBridge:
pred_srv_name = '/frongo/predict_models_with_order'
entr_srv_name = '/frongo/get_entropies_with_order'
info_srv_name = '/frongo/get_models'
states_srv_name = '/frongo/get_states'
def __init__(self):
rospy.loginfo('waiting for services')
rospy.wait_for_service(self.pred_srv_name)
rospy.wait_for_service(self.info_srv_name)
rospy.wait_for_service(self.entr_srv_name)
rospy.wait_for_service(self.states_srv_name)
self.pred_srv = rospy.ServiceProxy(self.pred_srv_name,
PredictStateOrder)
self.entr_srv = rospy.ServiceProxy(self.entr_srv_name,
PredictStateOrder)
self.info_srv = rospy.ServiceProxy(self.info_srv_name,
GetInfo)
self.states_srv = rospy.ServiceProxy(self.states_srv_name,
PredictState)
rospy.loginfo('frongo services ready')
def get_info(self):
infos = self.info_srv()
res = zip(infos.names, infos.info)
return res
def query_values(self, model, order, epochs):
res = self.pred_srv(model, int(order), epochs)
return res
def query_states(self, model, fr, to):
res = self.states_srv(model, [fr, to])
return res
def query_entropies(self, model, order, epochs):
res = self.entr_srv(model, int(order), epochs)
return res
class Query:
RESOLUTION = 500
MIN_STEP = 300
def dts_to_epoch(self, dts):
return int(mktime(strptime(dts, DATETIME_PATTERN)))
def epoch_to_dts(self, epoch):
return datetime.fromtimestamp(epoch).strftime(DATETIME_PATTERN)
def query_frongo(self, model, order, epoch_from, epoch_to):
duration = epoch_to - epoch_from
steps_from_duration = int(duration / self.RESOLUTION)
steps = max(steps_from_duration, self.MIN_STEP)
epochs = range(epoch_from, epoch_to+steps, steps)
rospy.loginfo(epochs)
# to be changed into the actual query
frongo = FrongoBridge()
fpred = frongo.query_values(model, order, epochs)
fentr = frongo.query_entropies(model, order, epochs)
finfo = ''
fstates = frongo.query_states(model, epoch_from, epoch_to)
# we can use these fstates to eventually display the real observations
for f in frongo.get_info():
if f[0] == model:
finfo = f[1]
res = {
'epochs': fpred.epochs,
'values': fpred.predictions,
'states': fstates.predictions,
'states_epochs': fstates.epochs,
'entropies': fentr.predictions,
'model_info': finfo
}
# res = {
# 'epochs': epochs,
# 'values': [random() for e in epochs],
# 'entropies': [random() for e in epochs]
# }
return res
def prepare_prediction_plot(self, d):
dataset_probs = {
'label': 'Probability',
'backgroundColor': "rgba(0,0,220,0.3)",
'borderColor': "rgba(0,0,220,1)",
'pointColor': "rgba(0,0,220,1)",
'pointStrokeColor': "#fff",
'pointHighlightFill': "#fff",
'pointHighlightStroke': "rgba(220,220,220,1)",
'data': d['values']
}
dataset_ent = {
'label': 'Entropy',
'backgroundColor': "rgba(0,220,120,0.3)",
'borderColor': "rgba(0,220,120,1)",
'pointColor': "rgba(0,220,120,1)",
'pointStrokeColor': "#fff",
'pointHighlightFill': "#fff",
'pointHighlightStroke': "rgba(220,220,220,1)",
'data': d['entropies']
}
data = {
'labels': [self.epoch_to_dts(s)
for s in d['epochs']],
'datasets': [dataset_probs, dataset_ent],
'model_info': d['model_info']
}
return data
def prepare_observation_plot(self, d):
dataset_obs = {
'label': 'Observations',
'fill': False,
'backgroundColor': "rgba(220,0,220,0.3)",
'borderColor': "rgba(0,0,0,0)",
'borderWidth': 0,
'pointStrokeColor': "#fff",
'pointHighlightFill': "#fff",
'pointHighlightStroke': "rgba(220,220,220,1)",
'data': [{'x': p[1], 'y': p[0]} for p in zip(d['states'], d['states_epochs'])]
}
print 'data:', dataset_obs['data']
data = {
'type': 'line',
'labels': [self.epoch_to_dts(s)
for s in d['states_epochs']],
'datasets': [dataset_obs],
'model_info': d['model_info']
}
return data
def GET(self):
user_data = web.input()
print user_data
if len(user_data['model']) == 0:
rospy.logwarn('empty model received from web form')
return web.BadRequest()
try:
epoch_from = self.dts_to_epoch(user_data['epoch_from'])
except Exception as e:
rospy.logwarn(e)
epoch_from = int(time())-3600
try:
epoch_to = self.dts_to_epoch(user_data['epoch_to'])
except Exception as e:
rospy.logwarn(e)
epoch_to = int(time())
d = self.query_frongo(user_data['model'],
user_data['order'],
epoch_from,
epoch_to)
prediction_chart = self.prepare_prediction_plot(d)
observation_chart = self.prepare_observation_plot(d)
query_params = {
'model': user_data['model'],
'order': str(user_data['order']),
'from': self.epoch_to_dts(epoch_from),
'to': self.epoch_to_dts(epoch_to)
}
data = {
'prediction_chart': prediction_chart,
'observation_chart': observation_chart,
'url': '/?' + urlencode(query_params),
'min': epoch_from,
'max': epoch_to,
'model_info': prediction_chart['model_info']
}
data['url'] = '/?' + urlencode(query_params)
return dumps(data, default=json_util.default)
def signal_handler(signum, frame):
_exit(signal.SIGTERM)
if __name__ == '__main__':
rospy.init_node("frongo_webserver")
port = rospy.get_param('~port', 8999)
app = FrongoApp(urls, globals())
signal.signal(signal.SIGINT, signal_handler)
app.run(port=port)
|
20,872 | f0962ad8e3c0a956f8c3c07f8ff76659bcc08d6f | from abc import ABC, abstractmethod
from common import COLORS, Vector
# tile-based game, either popils or megalit
class Game(ABC):
# subclasses must expose a method to generate a solving move sequence
@abstractmethod
def solve(self):
pass
# subclasses must expose a method to reduce 3SAT into a game level
@abstractmethod
def reduce(self):
pass
# returns the bounding box surrounding affected game-grid elements
@abstractmethod
def update(self, command):
pass
# compute and store the reduction and solving move sequence
def __init__(self, puzzle):
self.complete = False
self.puzzle = puzzle
self.reduce() # build the grid
self.solve() # build the solution
self.solution_step = 0
def __repr__(self):
return repr(self.grid)
# this class will populate the game grid. currently this is just a wrapper for a color
class Block():
def __init__(self, type, slab=None, short_sides=None):
self.type = type
self.slab = slab
self.short_sides = short_sides
def __setattr__(self, name, value):
if name == 'type':
self.identity = value
self.color = COLORS[self.identity]
else:
super().__setattr__(name, value)
def __getattr__(self, name):
if name == 'type':
return self.identity
def __repr__(self):
return self.type.upper()[0]
# wrapper class to track player position
class Player():
def __init__(self, pos):
self.pos = pos
self.color = (255, 0, 0) # red
self.gripping = Vector(0, 0)
# wrapper for a 2d matrix allowing vector indexing
class Grid:
def __init__(self, *args):
if callable(args[-1]):
initializer = args[-1]
args = args[:-1]
else:
def initializer(): return None
if len(args) == 1:
self.dim = args[0]
else:
self.dim = Vector(args[0], args[1])
self.grid = [[initializer() for y in range(self.dim.y)]
for x in range(self.dim.x)]
def __getitem__(self, key):
if type(key) == Vector:
return self.grid[int(key.x)][int(key.y)]
else:
x, y = key
return self.grid[int(x)][int(y)]
def __setitem__(self, key, value):
if type(key) == Vector:
self.grid[int(key.x)][int(key.y)] = value
else:
x, y = key
self.grid[int(x)][int(y)] = value
def __repr__(self):
result = ''
transposed_grid = zip(*self.grid)
for row in transposed_grid:
for block in row[::-1]: # flip output horizontally
result += repr(block) + ' '
result += '\n'
return result[::-1] # flip output vertically
|
20,873 | fda908f00cface08b2cb5fd2f75da4cf17374e1b | import falcon
import json
class ParseJson():
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = "Test Ok"
def on_post(self, req, resp):
try:
raw_json = req.stream.read()
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, 'Error', ex.message)
try:
result_json = json.loads(raw_json.decode('utf8'))
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, 'Bad format of JSON')
resp.status = falcon.HTTP_202
resp.body = result_json[0]["person"] + " " + result_json[0]["phone"]
print(result_json[0]["person"] + " " + result_json[0]["phone"]) |
20,874 | 8f0849b6c1f61093f316dc8c928dfba685ffe7c7 | from __future__ import print_function, division, absolute_import
from io import BytesIO
from dask import do
from .compression import compressors, decompressors
from ..executor import default_executor, ensure_default_get
from ..utils import ensure_bytes, log_errors
def bytes_read_csv(b, header, kwargs):
""" Convert a block of bytes to a Pandas DataFrame
Parameters
----------
b: bytestring
The content to be parsed with pandas.read_csv
header: bytestring
An optional header to prepend to b
kwargs: dict
A dictionary of keyword arguments to be passed to pandas.read_csv
See Also:
distributed.formats.csv.read_csv
"""
import pandas as pd
with log_errors():
compression = kwargs.pop('compression', None)
b2 = decompressors[compression](b)
bio = BytesIO()
if header:
if not header.endswith(b'\n') and not header.endswith(b'\r'):
header = header + ensure_bytes(kwargs.get('lineterminator', b'\n'))
bio.write(header)
bio.write(b2)
bio.seek(0)
return pd.read_csv(bio, **kwargs)
def read_csv(block_lists, header, head, kwargs, lazy=True, collection=True,
executor=None):
""" Convert blocks of bytes to a dask.dataframe or other high-level object
This accepts a list of lists of futures/values of bytes where each list
corresponds to one file, and the futures/values of bytes concatenate to
comprise the entire file, in order.
Parameters
----------
block_lists: list of lists of futures of bytes
The lists of bytestrings with each list corresponding to one logical file
header: bytestring
The header, found at the front of the first file, to be prepended to
all blocks
head: pd.DataFrame
An example Pandas DataFrame to be used for metadata
kwargs: dict
Keyword arguments to pass down to ``pd.read_csv``
lazy: boolean, optional (defaults to True)
collection: boolean, optional (defaults to True)
Returns
-------
A dask.dataframe, or list of futures or values, depending on the value of
lazy and collection.
"""
from dask.dataframe import from_imperative
executor = default_executor(executor)
dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] +
[do(bytes_read_csv)(b, header, kwargs)
for b in blocks[1:]]
for blocks in block_lists]
dfs2 = sum(dfs1, [])
ensure_default_get(executor)
if collection:
result = from_imperative(dfs2, head)
else:
result = dfs2
if not lazy:
if collection:
result = executor.persist(result)
else:
result = executor.compute(result)
return result
|
20,875 | f9eccb3c309043979ed5300fdc07bd8ac04af66a | # Generated by Django 3.2.6 on 2021-08-03 07:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_orders_product'),
]
operations = [
migrations.AddField(
model_name='product',
name='category',
field=models.CharField(choices=[('Indoor', 'Indoor'), ('Out Door', 'Out Door')], max_length=200, null=True),
),
migrations.AddField(
model_name='product',
name='description',
field=models.CharField(max_length=1000, null=True),
),
migrations.AddField(
model_name='product',
name='name',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='product',
name='price',
field=models.FloatField(max_length=200, null=True),
),
]
|
20,876 | 9a5304104e386a9a55deaadadd9e5a602a818aa2 | #!/usr/bin/env python
'''
------------------------------------------------------------------------------------------
Copyright 2020 Romeo Dabok
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------------------
Oh this is the forum script. Notice the async thingy? Thats becuase while testing it,
I discovered when a new post is uploaded and the page refreshes, the change wont be
registered immediately. So I put a little delay time to give the post all the time in
the world (its world) to update.
'''
#import modules for CGI handling
import cgi, cgitb
import theroom
import time
import asyncio
cgitb.enable()
#If this is a room entrance okay save it to our cookiesss
form = cgi.FieldStorage()
rid = form.getvalue('forRoom')
rpg = form.getvalue('forPage')
#I had to find a way to make the room load pause while waiting for a user to post.
#So i split up the script using asyncio
async def main(wtime):
dr = theroom.showForum(rid,rpg)
await asyncio.sleep(wtime)
if len(dr) < 1:
dr = '''
<div class="col-sm-12 col-md-12 col-lg-12"
style="background-color: #dedef8; box-shadow: inset 1px -1px 1px #444, inset -1px 1px 1px #444;">
<p>This page is empty!.</p>
</div>
'''
rpage = '''
<h2>The Lobby</h2>
<div class="container">
%s
</div>
''' %dr
print("Content-Type: text/html\r\n")
print(rpage)
if not theroom.checkCookie():
print("Content-Type: text/html\r\n")
print("<h1>Something went wrong</h1>")
else:
#Is the user making a new post?
postmsg = form.getvalue('postm')
waitt = 0
if postmsg:
ido = theroom.getIdentification()
ford = theroom.getForumData(int(rid))
dt = time.time()
ford.execute('''INSERT INTO posts (author, content, datetime) VALUES (?,?,?)''',(int(ido[0]),postmsg,dt))
ford.commit()
ford.close()
waitt = 1
asyncio.run(main(waitt)) |
20,877 | b3080e1275ecd99a94af2492bfe5ed5894b48fc8 | '''
Created on 15.5.2015
A program, which finds all the indexes of word 'flower' in the following text:
"A flower is a beautiful plant, which can be planted in the garden or used to decorate
home. One might like a flower for its colors and the other might like a flower for its
smell. A flower typically symbolizes soft feelings".
@author: e1201757
'''
text = 'A flower is a beautiful plant, which can be planted in the garden or used to decorate home. One might like a flower for its colors and the other might like a flower for its smell. A flower typically symbolizes soft feelings'
index = 0
while index < len(text):
index = text.find('flower', index)
if index == -1:
break
print(index)
index += 6
|
20,878 | a3bdab68af1985b986c8a95c13825cf8f48987b7 | import numpy as np
from np_ml import AdaBoost, TrivialClassification
if __name__ == '__main__':
print("--------------------------------------------------------")
print("AdaBoost simple example!")
print("example in Statistical Learning Method(《统计学习方法》)")
print("--------------------------------------------------------")
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
y = np.array([1, 1, 1, -1, -1, -1, 1, 1, 1, -1])
print("x: {}".format(x))
print("y: {}".format(y))
print("")
adb = AdaBoost(TrivialClassification)
adb.fit(x, y, detailed=True)
print("y_pred: {}".format(adb.predict(x)))
|
20,879 | d4a5b57b39ace7b13a8abf64a78cee132d8acbd1 | from django import forms
from .models import Rating
from crispy_forms.helper import FormHelper
def get_client_ip(request):
try:
# ip addresses: 'client,proxy1,proxy2'
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
except:
ip = ''
return ip
class Text:
"""text for rating fields in forms"""
info = ("The availability of information about "
"your city's or town's services")
housing = 'Your satisfaction with the cost of housing'
schools = 'The quality of public schools'
police = 'Your trust in the local police'
streets = 'The maintenance of streets and sidewalks'
events = 'The availability of social community events'
def set_rating_choice_field(widget=None):
"""In form classes, this helps set rating fields to forms.ChoiceField."""
def make_choice(choice_text):
CHOICES = [(5, f'5: {choice_text} is high.'), (4, '4'), (3, '3'),
(2, '2'), (1, f'1: {choice_text} is low.')]
if widget:
choice_field = forms.ChoiceField(widget=widget, choices=CHOICES)
else:
choice_field = forms.ChoiceField(choices=CHOICES)
return choice_field
return make_choice
class Survey_Form(forms.ModelForm):
"""for submitting happiness surveys"""
happy = forms.ChoiceField(widget=forms.RadioSelect, choices=[
(1, "1: You're happy."),
(0, "0: You're not happy.")])
__choice_field = set_rating_choice_field(widget=forms.RadioSelect)
info = __choice_field(choice_text=Text.info)
housing = __choice_field(Text.housing)
schools = __choice_field(Text.schools)
police = __choice_field(Text.police)
streets = __choice_field(Text.streets)
events = __choice_field(Text.events)
class Meta:
model = Rating
# fields = '__all__'
fields = [
'happy',
'info',
'housing',
'schools',
'police',
'streets',
'events',
]
class Predict_Survey_Form(forms.Form):
"""for happiness predictions"""
__choice_field = set_rating_choice_field()
info = __choice_field(choice_text=Text.info)
housing = __choice_field(Text.housing)
schools = __choice_field(Text.schools)
police = __choice_field(Text.police)
streets = __choice_field(Text.streets)
events = __choice_field(Text.events)
|
20,880 | affd3aca697e428adf77a27d2e27f3c871e1a559 | # im registration stuff
|
20,881 | cfaddfc9014475d3d40376611d563de442f55629 | sexo = input('Informe F para FEMININO ou M para MASCULINO: ')
if (sexo.upper() == 'M'):
print('MASCULINO')
elif (sexo.upper() == 'F'):
print('FEMININO')
else:
print('Sexo Invalido')
|
20,882 | 6a0592d214758adf745e69019b1adc45e2a64283 | from . import views
__all__ = ['views']
|
20,883 | 5a3a8430c654797655e58bbab6a51b454bb2ca0d |
'''
datautils.py: Just some routines that we use for moving data around
'''
from __future__ import print_function
import numpy as np
import librosa
import os
from os.path import isfile, splitext
from imageio import imread, imwrite
import glob
from skimage import img_as_ubyte
from random import shuffle
def listdir_nohidden(path,subdirs_only=False, skip_csv=True):
'''
ignore hidden files. call should be inside list(). subdirs_only means it ignores regular files
'''
for f in os.listdir(path):
if not f.startswith('.'): # this skips the hidden
if ((False==subdirs_only) or (os.path.isdir(path+"/"+f))):
if ('.csv' == os.path.splitext(f)[1]) and (skip_csv):
pass
else:
yield f
# class names are subdirectory names in Preproc/ directory
def get_class_names(path="Preproc/Train/", sort=True):
if (sort):
class_names = sorted(list(listdir_nohidden(path, subdirs_only=True))) # sorted alphabetically for consistency with "ls" command
else:
class_names = listdir_nohidden(path) # not in same order as "ls", because Python
return class_names
def get_total_files(class_names, path="Preproc/Train/"):
sum_total = 0
for subdir in class_names:
files = os.listdir(path+subdir)
n_files = len(files)
sum_total += n_files
return sum_total
def scale_to_uint8(float_img):
#out_img = 255*(float_img - np.min(float_img))/np.ptp(float_img).astype(np.uint8)
out_img = img_as_ubyte( (float_img-np.min(float_img))/np.ptp(float_img) )
return out_img
def save_melgram(outfile, melgram, out_format='npz'):
channels = melgram.shape[3]
melgram = melgram.astype(np.float16)
if (('jpeg' == out_format) or ('png' == out_format)) and (channels <=4):
melgram = np.squeeze(melgram) # squeeze gets rid of dimensions of batch_size 1
#melgram = np.moveaxis(melgram, 1, 3).squeeze() # we use the 'channels_first' in tensorflow, but images have channels_first. squeeze removes unit-size axes
melgram = np.flip(melgram, 0) # flip spectrogram image right-side-up before saving, for viewing
if (2 == channels): # special case: 1=greyscale, 3=RGB, 4=RGBA, ..no 2. so...?
# pad a channel of zeros (for blue) and you'll just be stuck with it forever. so channels will =3
# TODO: this is SLOWWW
b = np.zeros((melgram.shape[0], melgram.shape[1], 3)) # 3-channel array of zeros
b[:,:,:-1] = melgram # fill the zeros on the 1st 2 channels
imwrite(outfile, scale_to_uint8(b), format=out_format)
else:
imwrite(outfile, scale_to_uint8(melgram), format=out_format)
elif ('npy' == out_format):
np.save(outfile,melgram=melgram)
else:
np.savez_compressed(outfile,melgram=melgram) # default is compressed npz file
return
def load_audio(audio_path, mono=None, sr=None, convertOSXaliases=True): # wrapper for librosa.load
try:
signal, sr = librosa.load(audio_path, mono=mono, sr=sr)
except NoBackendError as e:
if ('Darwin' == platform.system()): # handle OS X alias files gracefully
source = resolve_osx_alias(audio_path, convert=convertOSXaliases, already_checked_os=True) # convert to symlinks for next time
try:
signal, sr = librosa.load(source, mono=mono, sr=sr)
except NoBackendError as e:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
else:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
return signal, sr
def load_melgram(file_path):
#auto-detect load method based on filename extension
name, extension = os.path.splitext(file_path)
if ('.npy' == extension):
melgram = np.load(file_path)
elif ('.npz' == extension): # compressed npz file (preferred)
with np.load(file_path) as data:
melgram = data['melgram']
elif ('.png' == extension) or ('.jpeg' == extension):
arr = imread(file_path)
melgram = np.reshape(arr, (1,arr.shape[0],arr.shape[1],1)) # convert 2-d image
melgram = np.flip(melgram, 0) # we save images 'rightside up' but librosa internally presents them 'upside down'
else:
print("load_melgram: Error: unrecognized file extension '",extension,"' for file ",file_path,sep="")
#print("melgram.shape = ",melgram.shape)
return melgram
def get_sample_dimensions(class_names, path='Preproc/Train/'):
classname = class_names[0]
audio_path = path + classname + '/'
infilename = os.listdir(audio_path)[0]
melgram = load_melgram(audio_path+infilename)
print(" get_sample_dimensions: "+infilename+": melgram.shape = ",melgram.shape)
return melgram.shape
def encode_class(class_name, class_names, label_smoothing=0.005):
# makes a "one-hot" vector for each class name called
# label_smoothing is a parameter to make the training more robust to mislabeled data
try:
idx = class_names.index(class_name)
num_classes = len(class_names)
vec = np.zeros(num_classes)
vec[idx] = 1
if label_smoothing > 0:
vec = vec * (1 - label_smoothing) + label_smoothing / num_classes
return vec
except ValueError:
return None
def decode_class(vec, class_names): # generates a number from the one-hot vector
return int(np.argmax(vec))
def shuffle_XY_paths(X,Y,paths): # generates a randomized order, keeping X&Y(&paths) together
assert (X.shape[0] == Y.shape[0] )
#print("shuffle_XY_paths: Y.shape[0], len(paths) = ",Y.shape[0], len(paths))
idx = np.array(range(Y.shape[0]))
np.random.shuffle(idx)
newX = np.copy(X)
newY = np.copy(Y)
newpaths = paths[:]
for i in range(len(idx)):
newX[i] = X[idx[i],:,:]
newY[i] = Y[idx[i],:]
newpaths[i] = paths[idx[i]]
return newX, newY, newpaths
def make_melgram(mono_sig, sr, n_mels=128): # @keunwoochoi upgraded form 96 to 128 mel bins in kapre
#melgram = librosa.logamplitude(librosa.feature.melspectrogram(mono_sig, # latest librosa deprecated logamplitude in favor of amplitude_to_db
# sr=sr, n_mels=96),ref_power=1.0)[np.newaxis,np.newaxis,:,:]
melgram = librosa.amplitude_to_db(librosa.feature.melspectrogram(mono_sig,
sr=sr, n_mels=n_mels))[np.newaxis,:,:,np.newaxis] # last newaxis is b/c tensorflow wants 'channels_last' order
'''
# librosa docs also include a perceptual CQT example:
CQT = librosa.cqt(mono_sig, sr=sr, fmin=librosa.note_to_hz('A1'))
freqs = librosa.cqt_frequencies(CQT.shape[0], fmin=librosa.note_to_hz('A1'))
perceptual_CQT = librosa.perceptual_weighting(CQT**2, freqs, ref=np.max)
melgram = perceptual_CQT[np.newaxis,np.newaxis,:,:]
'''
return melgram
def make_phase_gram(mono_sig, sr, n_bins=128):
stft = librosa.stft(mono_sig)#, n_fft = (2*n_bins)-1)
magnitude, phase = librosa.magphase(stft) # we don't need magnitude
# resample the phase array to match n_bins
phase = np.resize(phase, (n_bins, phase.shape[1]))[np.newaxis,:,:,np.newaxis]
return phase
# turn multichannel audio as multiple melgram layers
def make_layered_melgram(signal, sr, mels=128, phase=False):
if (signal.ndim == 1): # given the way the preprocessing code is now, this may not get called
signal = np.reshape( signal, (1,signal.shape[0]))
# get mel-spectrogram for each channel, and layer them into multi-dim array
for channel in range(signal.shape[0]):
melgram = make_melgram(signal[channel],sr, n_mels=mels)
if (0 == channel):
layers = melgram
else:
layers = np.append(layers,melgram,axis=3) # we keep axis=0 free for keras batches, axis=3 means 'channels_last'
if (phase):
phasegram = make_phase_gram(signal[channel],sr, n_bins=mels)
layers = np.append(layers,phasegram,axis=3)
return layers
def nearest_multiple( a, b ): # returns number smaller than a, which is the nearest multiple of b
return int(a/b) * b
# can be used for test dataset as well
def build_dataset(path="Preproc/Train/", load_frac=1.0, batch_size=None, tile=False, max_per_class=0):
class_names = get_class_names(path=path)
print("class_names = ",class_names)
nb_classes = len(class_names)
total_files = get_total_files(class_names, path=path)
total_load = int(total_files * load_frac)
if max_per_class > 0:
total_load = min( total_load, max_per_class * nb_classes)
if (batch_size is not None): # keras gets particular: dataset size must be mult. of batch_size
total_load = nearest_multiple( total_load, batch_size)
print(" total files = ",total_files,", going to load total_load = ",total_load)
print("total files = ",total_files,", going to load total_load = ",total_load)
# pre-allocate memory for speed (old method used np.concatenate, slow)
mel_dims = get_sample_dimensions(class_names,path=path) # get dims of sample data file
if (tile):
ldims = list(mel_dims)
ldims[3] = 3
mel_dims = tuple(ldims)
print(" melgram dimensions: ",mel_dims)
X = np.zeros((total_load, mel_dims[1], mel_dims[2], mel_dims[3]))
Y = np.zeros((total_load, nb_classes))
paths = []
load_count = 0
for idx, classname in enumerate(class_names):
print("")
this_Y = np.array(encode_class(classname,class_names) )
this_Y = this_Y[np.newaxis,:]
class_files = os.listdir(path+classname)
shuffle(class_files) # just to remove any special ordering
n_files = len(class_files)
n_load = int(n_files * load_frac) # n_load is how many files of THIS CLASS are expected to be loaded
if max_per_class > 0:
n_load = min( n_load, max_per_class)
printevery = 100
file_list = class_files[0:n_load]
for idx2, infilename in enumerate(file_list): # Load files in a particular class
audio_path = path + classname + '/' + infilename
if (0 == idx2 % printevery) or (idx2+1 == len(class_files)):
print("\r Loading class ",idx+1,"/",nb_classes,": \'",classname,
"\', File ",idx2+1,"/", n_load,": ",audio_path," ",
sep="",end="")
#auto-detect load method based on filename extension
melgram = load_melgram(audio_path)
if (tile) and (melgram.shape != mel_dims):
melgram = np.tile(melgram, 3)
elif (melgram.shape != mel_dims):
print("\n\n WARNING: Expecting spectrogram with dimensions mel_dims = ",mel_dims,", but got one with melgram.shape = ",melgram.shape)
print(" The offending file is = ",audio_path)
# usually it's the 2nd dimension of melgram.shape that is affected by audio file length
use_len = min(X.shape[2],melgram.shape[2])
X[load_count,:,0:use_len] = melgram[:,:,0:use_len]
#X[load_count,:,:] = melgram
Y[load_count,:] = this_Y
paths.append(audio_path)
load_count += 1
if (load_count >= total_load): # Abort loading files after last even multiple of batch size
break
if (load_count >= total_load): # Second break needed to get out of loop over classes
break
print("")
if ( load_count != total_load ): # check to make sure we loaded everything we thought we would
raise Exception("Loaded "+str(load_count)+" files but was expecting "+str(total_load) )
X, Y, paths = shuffle_XY_paths(X,Y,paths) # mix up classes, & files within classes
return X, Y, paths, class_names
|
20,884 | 618418822584de286338a389b8200ebbe5db47d5 | import json
import pandas as pd
import numpy as np
from tqdm import tqdm
def read_csv(path, strict_mode=True):
"""
:param path: path to csv file(sota data)
:param strict_mode: filtering items by standard that all metrics's value must be equal (only one value be equal by reverse).
:return: Value_dict={value:{id_one,id_two,...}}, different id that contained same metric value
"""
df = pd.read_csv(path, encoding='utf-8')
metric_value = df['metric']
metric_value = [list(json.loads(i.replace('\'', '\"')).values()) for i in metric_value]
metric_value = [[float(j) for j in i] for i in metric_value]
id_list = list(df['id'])
assert len(id_list) == len(metric_value)
# id_dict = dict(zip(id_list, metric_value))
value_dict = {}
if strict_mode:
for index, values in enumerate(metric_value):
if len(values) > 1:
values[0] = values[0] * 1.5
num = float(np.sum(values))
if value_dict.get(num) is None:
value_dict[num] = set([id_list[index]])
else:
value_dict[num].add(id_list[index])
else:
if value_dict.get(values[0]) is None:
value_dict[values[0]] = set([id_list[index]])
else:
value_dict[values[0]].add(id_list[index])
else:
for index, values in enumerate(metric_value):
for value in values:
if value_dict.get(value) is None:
value_dict[value] = set([id_list[index]])
else:
value_dict[value].add(id_list[index])
return df, value_dict
def similar_value():
# filtering id set by dataset's name
df, value_dict = read_csv('./data/Sota_Evaluations.csv')
filtered_list = []
for key, value in value_dict.items():
if len(set(value)) > 1:
df_data = df.loc[df['id'].isin(value)]
data_list = df_data['dataset']
if len(set(data_list)) == len(data_list):
continue
else:
df_dup = df_data[data_list.duplicated()]
dup_list = list(set(df_dup["dataset"]))
for dup in dup_list:
filtered_list.append(list(df_data.loc[df_data['dataset']==dup, 'id']))
print(len(filtered_list))
return filtered_list, df
# def similar_value():
# df, value_dict, metric_value = read_csv('./data/Sota_Evaluations.csv')
# final_id = []
# second_dict = value_dict.copy()
# print(len(second_dict))
#
# for id_one, value_one in tqdm(value_dict.items()):
# one_item = [id_one]
#
# second_dict.pop(id_one)
# for id_two, value_two in tqdm(second_dict.items()):
# value_two.extend(value_one)
# if len(set(value_two)) < len(value_two):
# one_item.append(id_two)
#
# if len(set(one_item)) > 1:
# final_id.append(list(set(one_item)))
# print(final_id)
# return final_id
if __name__ == "__main__":
filtered_list, df = similar_value()
filtered_list = [j for i in filtered_list for j in i]
df_re = pd.DataFrame()
for i in filtered_list:
df_re = df_re.append(df.loc[df['id']==i], ignore_index=True)
df_co = pd.read_csv('./data/Done/completing_results.csv')
paper_dict = dict(zip(df_co['id'], df_co['paper']))
url_dict = dict(zip(df_co['id'], df_co['paperurl']))
for id in df_re['id']:
if paper_dict.get(id) is not None:
df_re.loc[df_re['id'] == id, 'paper'] = paper_dict.get(id)
df_re.loc[df_re['id'] == id, 'paperurl'] = url_dict.get(id)
df_re.to_csv('./data/sample2.csv', index=False, sep=',', encoding='utf-8') |
20,885 | 755e4e7ec4667f84da2e7387aadb5ef0c8f45759 | import numpy as np
class DriftData:
def __init__(self, dataPath):
self.data = np.loadtxt(dataPath)#maybe make sure the path exists and there is a file there?
#extract the data
self.localVelXY = (self.data[:,0], self.data[:,1])
self.angularVel = self.data[:,2]
self.sideslip = self.data[:,3]
self.nextWP0 = (self.data[:,4], self.data[:,5])
self.nextWP1 = (self.data[:,6], self.data[:,7])
self.nextWP2 = (self.data[:,8], self.data[:,9])
self.nextWP3 = (self.data[:,10], self.data[:,11])
self.nextWP4 = (self.data[:,12], self.data[:,13])
self.rpm = self.data[:,14]
self.currSteerDir = self.data[:,15]
|
20,886 | be8099f8910a88d91bc30cbf7e8e2e9d6e06d49e | """Shell class."""
import argparse
import importlib
import os
import sys
from docknv.logger import Logger
from docknv.version import __version__
STANDARD_COMMANDS = (
"config",
"service",
"env",
"schema",
"images",
"user",
"scaffold",
"custom",
"machine",
)
class Shell(object):
"""Shell entry-point."""
def __init__(self):
"""Init."""
self.parser = argparse.ArgumentParser(
description="Docker w/ environments (docknv v.{0})".format(
__version__
)
)
self.parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__
)
self.parser.add_argument(
"-v", "--verbose", action="store_true", help="verbose mode"
)
self.parser.add_argument(
"-p", "--project", help="project path", default="."
)
self.parser.add_argument(
"--dry-run", help="dry run", action="store_true"
)
self.subparsers = self.parser.add_subparsers(
dest="command", metavar=""
)
self.post_parsers = []
self.init_parsers()
def register_post_parser(self, fct, cfg, ctx):
"""
Register a new parser function.
:param fct: Parser function (fn)
:param cfg: Configuration (dict)
:param ctx: Context (dict)
"""
self.post_parsers.append((fct, cfg, ctx))
def run(self, args):
"""
Start and read command-line arguments.
:param args: Arguments
"""
args_count = len(args)
if args_count == 0:
self.parser.print_help()
sys.exit(1)
return self.parse_args(self.parser.parse_args(args))
def init_parsers(self):
"""Initialize each parsers."""
for cmd in STANDARD_COMMANDS:
module = importlib.import_module("docknv.shell.handlers." + cmd)
module._init(self.subparsers)
def parse_args(self, args):
"""
Parse command-line args.
:param args: Arguments (iterable)
"""
# Verbose mode activation
if args.verbose:
Logger.set_log_level("DEBUG")
else:
Logger.set_log_level("INFO")
# Command detection
if args.command is None:
self.parser.print_help()
sys.exit(1)
# Command help detection
if args.command is not None:
cmd = args.command
subcmd_name = "{0}_cmd".format(cmd)
if cmd in self.subparsers.choices:
subpar = self.subparsers.choices[cmd]
if hasattr(args, subcmd_name):
if getattr(args, subcmd_name) is None:
subpar.print_help()
sys.exit(1)
return handle_parsers(self, args)
def handle_parsers(shell, args):
"""
Handle parsers.
:param shell: Shell
:param args: Arguments
:rtype: Exit code (int)
"""
# Exit code
exit_code = 0
# Absolute path
args.project = os.path.abspath(args.project)
module = importlib.import_module("docknv.shell.handlers." + args.command)
exit_code = module._handle(args)
return exit_code
|
20,887 | 211779e22250c83638ef030b54c2e8b67c3f82dc | from applications.assets.models import Equipment
from applications.station.models import StationBusiness, Station, StationRental
from django.forms import ModelForm
class StationBusinessForm(ModelForm):
class Meta:
model = StationBusiness
fields = "__all__"
def __init__(self, *args, **kwargs):
station_pk = kwargs.pop('station_pk', None)
super(StationBusinessForm, self).__init__(*args, **kwargs)
if station_pk is not None:
self.fields['station'].queryset = Station.objects.filter(pk=station_pk)
elif kwargs.get('instance', None) is not None:
self.fields['station'].queryset = Station.objects.filter(pk=kwargs['instance'].station.pk)
self.fields['business'].empty_label = None
self.fields['station'].empty_label = None
self.fields['station'].widget.attrs['readonly'] = 'readonly'
class StationRentalForm(ModelForm):
class Meta:
model = StationRental
fields = "__all__"
def __init__(self, *args, **kwargs):
super(StationRentalForm, self).__init__(*args, **kwargs)
instance = kwargs.get('instance', None)
if instance is not None:
self.fields['station'].queryset = Station.objects.filter(pk=instance.station.pk)
self.fields['equipment'].queryset = Equipment.objects.filter(pk=instance.equipment.pk)
self.fields['station'].empty_label = None
self.fields['equipment'].empty_label = None
self.fields['station'].widget.attrs['readonly'] = 'readonly'
self.fields['equipment'].widget.attrs['readonly'] = 'readonly'
|
20,888 | 7287d0bf1c3be77d57d3eba1518559878e6b6f34 | from distutils.core import setup
import py2exe
setup(
console=['extract_excel_sheet','write_excel'],
zipfile=None,
options={
'py2exe' : {
'packages' : [ 'xlrd','xlwt' ],
'dll_excludes':['w9xpopen.exe'],
"compressed" : 1,
"optimize" : 2,
"bundle_files" : 1
}
},
install_requires=[
'xlrd',
'xlwt'
],
) |
20,889 | 16c8f7d2ced75dec6b753392b81a5344f84a046e | from distutils.core import setup
setup(
name='rhc',
version='2.0',
packages=[
'rhc',
'rhc/database',
'rhc/micro_fsm',
'rhc/fsm'
],
)
|
20,890 | 7213dcab62877c05689651928922188e248bab13 | '''
Module for handling generic connections with a Sumo deployment.
@author: Weimin Ma
@contact: U{weimin@sumologic.com<mailto:weimin@sumologic.com>}
@since: 2016-04-23
'''
__all__ = ['rest', 'service']
from .rest import RESTConnector
from .service import ServiceConnector
|
20,891 | 74dcf782c17d2d1617d89dba7c5cb431d8148c08 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2015, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# http://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import unittest
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
class TestBefore(TestCase):
@classmethod
def setUpClass(cls):
"""Executed once before any test in the class"""
cls.t = Task()
cls.t('add foo entry:2008-12-22 start:2008-12-22')
cls.t('add bar entry:2009-04-17 start:2009-04-17')
def test_correctly_recorded_start(self):
"""Verify start dates properly recorded"""
code, out, err = self.t("_get 1.start")
self.assertEqual(out, "2008-12-22T00:00:00\n")
code, out, err = self.t("_get 2.start")
self.assertEqual(out, "2009-04-17T00:00:00\n")
def test_before_none(self):
"""Verify start.before:2008-12-01 yields nothing"""
code, out, err = self.t("start.before:2008-12-01 _ids")
self.assertNotIn("1", out)
self.assertNotIn("2", out)
def test_after_none(self):
"""Verify start.after:2009-05-01 yields nothing"""
code, out, err = self.t("start.after:2009-05-01 _ids")
self.assertNotIn("1", out)
self.assertNotIn("2", out)
def test_before_a(self):
"""Verify start.before:2009-01-01 yields '1'"""
code, out, err = self.t("start.before:2009-01-01 _ids")
self.assertIn("1", out)
self.assertNotIn("2", out)
def test_before_b(self):
"""Verify start.before:2009-05-01 yields '1' and '2'"""
code, out, err = self.t("start.before:2009-05-01 _ids")
self.assertIn("1", out)
self.assertIn("2", out)
def test_after_a(self):
"""Verify start.after:2008-12-01 yields '1' and '2'"""
code, out, err = self.t("start.after:2008-12-01 _ids")
self.assertIn("1", out)
self.assertIn("2", out)
def test_after_b(self):
"""Verify start.after:2009-01-01 yields '2'"""
code, out, err = self.t("start.after:2009-01-01 _ids")
self.assertNotIn("1", out)
self.assertIn("2", out)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python
|
20,892 | cdfa5dc1a0aa7c807f8703bac94f83e6f65bbd22 | import threading
from concurrent import futures
import logging
import time
FORMAT = '%(asctime)-15s\t %(process)s %(threadName)s %(process)s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT) |
20,893 | 62239c2eb4d3dfcc0032930922d6e31a2dcfb5a9 | # здесь будет считаться и выводиться с татистика по миру
import csv
class Measured_value:
'''
Одна измеряемая величина - достается из атрибута бота. Какая именно, указана в строоке
Несклдько таких величин содержатся в классе Specialization и обрабатываются однообразно
'''
def __init__(self, attribute, title):
self.attribute = attribute
self.title = title
self.flush()
def flush(self):
'''
основное здесь - это среднее значение, потому что счетчик особей и сумма величиныstati
по всем особям - это технические, промежуточные числа. Счетчик особей можно востать
из класса Specialization, вычислив длину списка особей. Так что возможно, что count
даже лишний.
Возможно здесь будут считаться не только среднее, но и медиана и дисперсия
'''
self.count = 0
self.sum = 0
self.avg = None
def increment(self, bot):
self.count += 1
self.sum += getattr(bot, self.attribute)
def average(self):
if self.count > 0:
self.avg = self.sum / self.count
else:
self.avg = 0
def display(self):
return (self.title, self.avg)
class Specialization:
'''
Здесь содержаься все боты, которые удовлетворяют определенному условию.
Функция-условие передается в качестве параметра
'''
MEASURE_VALUES = {'all_consumed_protein': 'протеин', 'children': 'дети', 'moves': 'ходы'}
def __init__(self, name, compare_function, condition):
self.is_condition = compare_function # функция, производящая отбор в страту
self.condition = condition # условие по которому отбираются особи
self.name = name
self.measur_vals = {}
for measure in self.MEASURE_VALUES:
self.measur_vals[measure] = Measured_value(measure, self.MEASURE_VALUES[measure])
self.members = []
def flush(self):
for i in self.measur_vals.values():
i.flush()
self.members = []
def check_profile(self, bot):
return self.is_condition(bot, *self.condition)
def check(self, bot):
if self.check_profile(bot):
for v in self.measur_vals.values():
v.increment(bot)
self.members.append(bot)
def get_averages(self):
for v in self.measur_vals.values():
v.average()
def count(self):
return len(self.members)
def return_measured_tuple(self, attribute):
return self.measur_vals[attribute].display()
traits = {1:'protein_predator', 2:'protein_plant', 3:'protein_mushroom'}
def profile(bot, prime_feature, multiplier=1):
prime = 0
secondary = 0
for feature in traits:
if feature == prime_feature:
prime += getattr(bot, traits[feature])
else:
secondary += getattr(bot, traits[feature])
return prime > multiplier * secondary
def unexpressed(bot, a) :
flag = True
for main in traits:
two_other = 0
for rest in traits:
if rest != main:
two_other += getattr(bot, traits[rest])
one = getattr(bot, traits[main])
if one > two_other:
flag = False
break
return flag
def return_true(*a):
return True
#species =['all', 'predator', 'plant', 'mushroom', 'unspecialized']
species =['все', 'хищники', 'растения', 'грибы', 'без специализации']
class Statistics:
MEASURE_PERIOD = 200
period_stat = open('bot_periodic_statistics.csv', 'w', encoding='UTF16')
CONDITIONS = [[return_true, [None]],
[profile, [1, 1]],
[profile, [2, 1]],
[profile, [3, 1]],
[unexpressed, [None]]]
def __init__(self):
self.strates = []
for i in range(len(Statistics.CONDITIONS)):
self.strates.append(Specialization(species[i], *Statistics.CONDITIONS[i]))
self.all_bots = []
self.write_header()
def add_to_measure_pool(self, bot):
self.all_bots.append(bot)
def flush_measure_pool(self):
self.all_bots = []
for strate in self.strates:
strate.flush()
def align_bots(self):
self.all_bots = sorted(self.all_bots, key = lambda x: x.id)
def sample_size(self):
return len(self.all_bots)
def bot_stat(self, some_bots_list, number=None):
'''
:param some_bots_list: отстртированный по какому-то критерию список ботов
:param number: количество ботов, которых надо вывести (топ-n)
:return: ничего, вызывает информацию о ботах в цикле
'''
if not number:
number = len(some_bots_list)
print('====================================')
for i in range(-1, -1-number, -1): # с конца списка в обратном порядке
some_bots_list[i].bot_info()
def write_all_bot_statistics(self, cycle):
header = "Номер бота\tСделано шагов\tПорождено потомков\tПоглощено протеина\tИз фотосинтеза\tИз хищничества\tдобыто естественно\n"
file_name = './zoutput/bot_statistics_%s.csv' % cycle
bot_record = open(file_name, 'w', encoding='UTF16')
bot_record.write(header)
for bot in self.all_bots:
bot_record.write(bot.bot_info_string() + '\n')
bot_record.close()
def count(self, cycle):
'''
Основной метод для полсчета статистики
'''
for bot in self.all_bots:
for strata in self.strates:
strata.check(bot)
for strata in self.strates:
strata.get_averages()
self.write_all_bot_statistics(cycle)
self.write_strates(cycle)
self.flush_measure_pool()
def write_header(self):
string = 'цикл'
# записываем количество особей по стратам
for strat in self.strates:
string += '\t{}'.format(strat.name)
# записываем средние значения измеряемых величин по стратам, поэтому нужны два цикла
for v in Specialization.MEASURE_VALUES:
for strat in self.strates:
string += '\t{} {}'.format(strat.name, strat.return_measured_tuple(v)[0])
string += '\n'
self.period_stat.write(string)
def write_strates(self, cycle):
string = '%5d' % cycle
# записываем количество особей по стратам
for strat in self.strates:
string += '\t{}'.format(strat.count())
for v in Specialization.MEASURE_VALUES:
for strat in self.strates:
string += '\t{}'.format(strat.return_measured_tuple(v)[1])
string +='\n'
self.period_stat.write(string.replace('.', ','))
|
20,894 | 767995917adfed242215e907713f3e4d4b9c9e5c | #!/usr/bin/python
# -*- coding:UTF-8 -*-
from selenium import webdriver
import unittest
import time
class Login(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
url = "http://my.dev.55haitao.com/login"
self.driver.get(url)
self.driver.implicitly_wait(30)
def login(self,name,psw):
self.driver.find_element_by_id("username").send_keys(name)
self.driver.find_element_by_id("pwd_show").clear()
time.sleep(4)
self.driver.find_element_by_id("pwd").click()
self.driver.find_element_by_id("pwd").send_keys(psw)
self.driver.find_element_by_id("loginbtn").click()
time.sleep(3)
def is_login_success(self):
try:
text = self.driver.find_element_by_xpath("//*[@class='header-login-name']/a").text
print("a")
print(text)
return True
except:
return False
def test_01(self):
self.login("18000000001","pppppp")
result = self.is_login_success()
print(result)
self.assertTrue(result)
def test_02(self):
self.login("18000000003", "pppppp")
result = self.is_login_success()
print(result)
self.assertTrue(result)
def tearDown(self):
self.driver.quit()
if __name__=="__main__":
unittest.main()
|
20,895 | 4c41aca2e18ce9f3e9832f60f8fe1a599a22ffad | def main():
reg_dict = {0: 0, 1: 0, 2: 6, 3: 0, 4: 0, 5: 0}
pointer = 6
seen = []
while True:
if pointer == 28:
if reg_dict[3] in seen:
print(seen[-1])
break
else:
seen.append(reg_dict[3])
pointer = 6
elif pointer == 6:
reg_dict[4] = reg_dict[3] | 65536
reg_dict[3] = 2176960
pointer = 8
elif pointer == 8:
reg_dict[1] = reg_dict[4] % 256
reg_dict[3] = (((reg_dict[3] + reg_dict[1]) % 16777216)*65899) % 16777216
if 256 > reg_dict[4]:
pointer = 28
else:
pointer = 17
elif pointer == 17:
reg_dict[1] = 0
pointer = 18
else: # pointer == 18
reg_dict[5] = (1 + reg_dict[1])*256
if reg_dict[5] > reg_dict[4]:
reg_dict[4] = reg_dict[1]
pointer = 8
else:
reg_dict[1] += 1
if __name__ == "__main__":
main()
|
20,896 | 053052562ba563c13af8c2f3cb6ffa9c8769d4e1 | """Token definitions"""
import sys
from typing import NamedTuple
__all__ = (
"TOKEN_ILLEGAL",
"TOKEN_INITIAL",
"TOKEN_EOF",
"TOKEN_TAG",
"TOKEN_EXPRESSION",
"TOKEN_STATEMENT",
"TOKEN_LITERAL",
"TOKEN_IDENTIFIER",
"TOKEN_STRING",
"TOKEN_INTEGER",
"TOKEN_FLOAT",
"TOKEN_EMPTY",
"TOKEN_NIL",
"TOKEN_NULL",
"TOKEN_BLANK",
"TOKEN_WITH",
"TOKEN_FOR",
"TOKEN_AS",
"TOKEN_BY",
"TOKEN_NEGATIVE",
"TOKEN_TRUE",
"TOKEN_FALSE",
"TOKEN_CONTAINS",
"TOKEN_IN",
"TOKEN_LPAREN",
"TOKEN_RPAREN",
"TOKEN_RANGE",
"TOKEN_LIMIT",
"TOKEN_OFFSET",
"TOKEN_REVERSED",
"TOKEN_COLS",
"TOKEN_PIPE",
"TOKEN_COLON",
"TOKEN_COMMA",
"TOKEN_DOT",
"TOKEN_LBRACKET",
"TOKEN_RBRACKET",
"TOKEN_ASSIGN",
"TOKEN_AND",
"TOKEN_OR",
"TOKEN_EQ",
"TOKEN_NE",
"TOKEN_LG",
"TOKEN_LT",
"TOKEN_GT",
"TOKEN_LE",
"TOKEN_GE",
"operators",
"reverse_operators",
"Token",
)
TOKEN_ILLEGAL = sys.intern("illegal")
TOKEN_INITIAL = sys.intern("initial")
TOKEN_EOF = sys.intern("eof")
TOKEN_TAG = sys.intern("tag")
TOKEN_EXPRESSION = sys.intern("expression")
TOKEN_STATEMENT = sys.intern("statement")
TOKEN_LITERAL = sys.intern("literal")
TOKEN_IDENTIFIER = sys.intern("identifier")
TOKEN_STRING = sys.intern("string")
TOKEN_INTEGER = sys.intern("integer")
TOKEN_FLOAT = sys.intern("float")
TOKEN_EMPTY = sys.intern("empty")
TOKEN_NIL = sys.intern("nil")
TOKEN_NULL = sys.intern("null")
TOKEN_BLANK = sys.intern("blank")
# "include" and "render" keywords
TOKEN_WITH = sys.intern("with")
TOKEN_FOR = sys.intern("for")
TOKEN_AS = sys.intern("as")
# "paginate" keywords
TOKEN_BY = sys.intern("by")
TOKEN_NEGATIVE = sys.intern("negative")
TOKEN_TRUE = sys.intern("true")
TOKEN_FALSE = sys.intern("false")
# String, array and hash membership
TOKEN_CONTAINS = sys.intern("contains")
# Looping symbols and keywords. Use by `for` and `tablerow` tags.
TOKEN_IN = sys.intern("in")
TOKEN_LPAREN = sys.intern("lparen")
TOKEN_RPAREN = sys.intern("rparen")
TOKEN_RANGE = sys.intern("range")
TOKEN_LIMIT = sys.intern("limit")
TOKEN_OFFSET = sys.intern("offset")
TOKEN_REVERSED = sys.intern("reversed")
# Tablerow specific argument
TOKEN_COLS = sys.intern("cols")
# Comparison symbols and logic operators for `if` and `unless` tags.
TOKEN_EQ = sys.intern("eq")
TOKEN_NE = sys.intern("ne")
TOKEN_LG = sys.intern("ltgt")
TOKEN_LT = sys.intern("lt")
TOKEN_GT = sys.intern("gt")
TOKEN_LE = sys.intern("le")
TOKEN_GE = sys.intern("ge")
TOKEN_AND = sys.intern("and")
TOKEN_OR = sys.intern("or")
# Filter symbols
TOKEN_PIPE = sys.intern("pipe")
TOKEN_COLON = sys.intern("colon")
TOKEN_COMMA = sys.intern("comma")
# Identifier symbols
TOKEN_DOT = sys.intern("dot")
TOKEN_LBRACKET = sys.intern("lbracket")
TOKEN_RBRACKET = sys.intern("rbracket")
# Assignment
TOKEN_ASSIGN = sys.intern("assign")
operators = {
"==": TOKEN_EQ,
"!=": TOKEN_NE,
"<>": TOKEN_LG,
"<": TOKEN_LT,
">": TOKEN_GT,
"<=": TOKEN_LE,
">=": TOKEN_GE,
"|": TOKEN_PIPE,
":": TOKEN_COLON,
",": TOKEN_COMMA,
".": TOKEN_DOT,
"-": TOKEN_NEGATIVE,
"(": TOKEN_LPAREN,
")": TOKEN_RPAREN,
"..": TOKEN_RANGE,
"[": TOKEN_LBRACKET,
"]": TOKEN_RBRACKET,
"=": TOKEN_ASSIGN,
}
reverse_operators = {v: k for k, v in operators.items()}
class Token(NamedTuple):
linenum: int
type: str
value: str
def test(self, typ: str) -> bool:
return self.type == typ
def istag(self, name: str) -> bool:
return self.type == TOKEN_TAG and self.value == name
|
20,897 | 1158434c06da5224c551f5ec325ae66079ed7a24 | import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from PIL import Image
import sys
import pickle
""" Reading images from filenames list """
def readImage(filenames, image_name, SHAPE):
data = []
for f in filenames:
img = plt.imread(f)
data.append(img)
sys.stdout.write("\r" + "Loading " + image_name + "'s images" + "." * (int((len(data) / (SHAPE[0] * SHAPE[1])) * 10)) + " " + str(int((len(data) / (SHAPE[0] * SHAPE[1])) * 100)) + " %")
return data
""" Filenames contain important information for camera position, those information may be represented by
filename = "out_00_00_-859.738525_1022.898743", where 00_00_ represents the position of lens,
and -859.73 represents y position, and 1022.89 represents x position. We observed that all picture in the first
row must be going from left position to right position, while y position does not guarantee to always have exactly the same
pattern. Therefore, we must determine whether y position goes from top -> bottom or bottom -> top.
"""
def determine_file_processing_order(filenames, x_max, y_max, dirpath):
first_image = filenames[0] #out_00_00_
last_row_first_image = filenames[(x_max - 1) * y_max] #out_16_00_
last_col_first_row = filenames[x_max - 1] #out_00_16
first_image = first_image[len(dirpath) + 10:]
last_row_first_image = last_row_first_image[len(dirpath) + 10:]
last_col_first_row = last_col_first_row[len(dirpath) + 10:]
if first_image[-4:] != ".png" or last_row_first_image[-4:] != ".png":
raise ValueError("Non png files are not supported")
first_image = first_image[:-5]
last_row_first_image = last_row_first_image[:-5]
last_col_first_row = last_col_first_row[:-5]
f_ = find_char(first_image)
l_ = find_char(last_row_first_image)
c_ = find_char(last_col_first_row)
first_val = float(first_image[:f_])
last_val = float(last_row_first_image[:l_])
first_x = float(first_image[f_+1:])
last_x = float(last_col_first_row[c_+1:])
return first_val > last_val, first_x > last_x
def write_to_a_file(image_name, img_lst):
with open("../docs/" + image_name, "wb") as f:
pickle.dump(img_lst, f)
def read_from_a_file(image_name):
res = []
with open("../docs/" + image_name, "rb") as f:
res = pickle.load(f)
return res
def find_char(filename):
for i in range(len(filename)):
if filename[i] == "_":
return i
raise ValueError("String value can't be parsed")
""" In case of going from right to left, we need extra handling. """
def reorder_filename(filenames, x_max, y_max):
res = []
for i in range(x_max):
res += ((filenames[i * y_max : (i+1) * y_max])[::-1])
return res
""" Image position operations, nop if reach boundary """
def going_up(cur_pos, SHAPE):
if cur_pos[1] >= SHAPE[1] - 1:
return cur_pos
else:
return (cur_pos[0], cur_pos[1] + 1)
def going_down(cur_pos, SHAPE):
if cur_pos[1] <= 0:
return cur_pos
else:
return (cur_pos[0], cur_pos[1] - 1)
def going_left(cur_pos, SHAPE):
if cur_pos[0] <= 0:
return cur_pos
else:
return (cur_pos[0] - 1, cur_pos[1])
def going_right(cur_pos, SHAPE):
if cur_pos[0] >= SHAPE[0] - 1:
return cur_pos
else:
return (cur_pos[0] + 1, cur_pos[1])
|
20,898 | 5903a81b2f8e5bc6b3c3275cadc5d68e6670d533 | #!/usr/bin/env python
import os
import sys
import subprocess
import argparse
import re
import logging
import operator
#--------------------------------------------------------------
# Global defines
#--------------------------------------------------------------
_TOOLCHIAN_PREFIX_ = "arm-melis-eabi-"
_TOOLCHIAN_GCC_ = "arm-melis-eabi-gcc"
_TOOLCHAIN_NM_ = "arm-melis-eabi-nm"
_TOOLCHAIN_NM_OPT_ = "-nlCS"
_TOOLCHAIN_SIZE_ = "arm-melis-eabi-size"
_TOOLCHAIN_SIZE_OPT_ = "-Axt"
_TOOLCHAIN_OBJDUMP_ = "arm-melis-eabi-objdump"
_TOOLCHAIN_OBJDUMP_OPT_ = "-D"
_TOOLCHAIN_ADDR2LINE_ = "arm-melis-eabi-addr2line"
_TOOLCHAIN_ADDR2LINE_OPT_ = "-pfiCe"
_TOOLCHAIN_READELF_ = "arm-melis-eabi-readelf"
_TOOLCHAIN_READELF_OPT_ = "-h"
_CRASH_LOG_ = "crash_log"
MATCH_ADDR = re.compile(r'0x[0-9a-f]{1,8}', re.IGNORECASE)
_ARCH_TYPE_ARM_ = "ARM"
g_arch = [_ARCH_TYPE_ARM_]
g_arch_toolchain = {
'arm' : 'arm-melis-eabi-gcc',
}
g_mm_leak_bt_cnt = 10
#--------------------------------------------------------------
# Environment and dependency check
#--------------------------------------------------------------
def get_arch_from_elf(elf_file):
if not elf_file:
return ""
arch_info = subprocess.check_output(
[_TOOLCHAIN_READELF_, _TOOLCHAIN_READELF_OPT_, elf_file])
for line in arch_info.splitlines():
if 'Machine' in line:
temp = line.split()
for arch in g_arch:
if arch in temp:
print "arch : " + arch
return arch
return ""
#--------------------------------------------------------------
# class Core_Dump
#--------------------------------------------------------------
class Core_Dump(object):
"""docstring for Core_Dump"""
def __init__(self, crash_log, elf_file, toolchain_path):
super(Core_Dump, self).__init__()
self.crash_log = crash_log
self.elf_file = elf_file.name
self.toolchain_path = toolchain_path
self.parse_addr_list = []
self.sp = ""
self.parse_step = 0
self.task_info = []
self.crash_type = "task"
self.arch = _ARCH_TYPE_ARM_
self.exc_num = 0;
self.arm_exc_reg = {}
self.xtensa_exc_reg = {}
# mm info parse
self.caller_list = []
self.caller_dictory = []
# mm leak parse
self.mm_leak_list = []
self.mm_leak_dictory = []
# print flag
self.print_flag = 0
self.check_env()
def find_pc_addr(self, pc_addr):
try:
pc_trans = subprocess.check_output([_TOOLCHAIN_ADDR2LINE_, _TOOLCHAIN_ADDR2LINE_OPT_, self.elf_file, pc_addr])
except Exception as err:
logging.exception(err)
else:
if not "?? ??:0" in pc_trans:
print pc_trans
else:
print "addr invalid"
def get_pc_addr(self, pc_addr):
try:
pc_trans = subprocess.check_output([_TOOLCHAIN_ADDR2LINE_, _TOOLCHAIN_ADDR2LINE_OPT_, self.elf_file, pc_addr])
except Exception as err:
logging.exception(err)
else:
if not "?? ??:0" in pc_trans:
return pc_trans
else:
return "invalid"
def get_value_form_line(self, line, index):
val_list = re.findall(MATCH_ADDR, line)
if val_list:
if index > len(val_list):
return ""
return val_list[index]
else:
return ""
def prase_addr(self, line, index):
addr = self.get_value_form_line(line, index)
if addr:
#print line
self.parse_addr_list.append(addr)
self.find_pc_addr(addr)
def check_env(self):
global _TOOLCHIAN_GCC_
global _TOOLCHAIN_ADDR2LINE_
if sys.version_info.major > 2:
error = """
This parser tools do not support Python Version 3 and above.
Python {py_version} detected.
""".format(py_version=sys.version_info)
print error
sys.exit(1)
'''
cmd = _TOOLCHIAN_GCC_;
if os.name == 'nt':
cmd = "where " + cmd
else:
cmd = "which " + cmd
retcode = subprocess.call(cmd, shell=True)
if retcode:
if not self.toolchain_path:
error = """
Can not find toolchian "{magic}" path
Please set PATH by:
export PATH=$PATH: ../build/compiler/../bin/
or:
use "-p" point to absolute toolchain path, ex:
python coredump.py {log} {elf} -p {path}
""".format(magic=_TOOLCHIAN_GCC_, log=self.crash_log.name, elf=self.elf_file, path="../build/compiler/../bin/")
print error
sys.exit(1)
else:
if not str(self.toolchain_path).endswith('/'):
self.toolchain_path = self.toolchain_path + '/'
_TOOLCHAIN_ADDR2LINE_ = self.toolchain_path + _TOOLCHAIN_ADDR2LINE_
'''
def open_print_line(self):
self.print_flag = 1
def close_print_line(self):
self.print_flag = 0
def get_print_status(self):
return self.print_flag
def show(self):
log_lines = iter(self.crash_log.read().splitlines())
for line in log_lines:
if self.get_print_status() == 1:
print line
#begin to parse one line
if "backtrace" in line:
#print line
if "interrupt" in line:
self.crash_type = "interrupt"
elif "task" in line:
self.crash_type = "task"
else:
self.prase_addr(line, 0)
else:
pass
#--------------------------------------------------------------
# Main
#--------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description='Melis crash log core dump')
# specify arguments
parser.add_argument(metavar='CRASH LOG', type=argparse.FileType('rb', 0),
dest='crash_log', help='path to crash log file')
parser.add_argument(metavar='ELF FILE', type=argparse.FileType('rb', 0),
dest='elf_file', help='elf file of application')
parser.add_argument('-p','--path', help="absolute path of build/compiler/../bin", default='')
args = parser.parse_args()
# parser core_dump
core_dump_ins = Core_Dump(args.crash_log, args.elf_file, args.path)
core_dump_ins.show()
#close all files
if args.crash_log:
args.crash_log.close()
if args.elf_file:
args.elf_file.close()
if __name__ == "__main__":
main()
|
20,899 | cee594d93f94fe3ec2b34f1f8703653eed2aa17f | from typing import List
import pytorch_lightning as pl
import torch as th
import torch.nn.functional as F
from adabelief_pytorch import AdaBelief
from lifelines.utils import concordance_index
from sklearn.metrics import r2_score
from utility.layers import MLP_IC, GraphNet
class EFA_DTI_Module(pl.LightningModule):
def __init__(
self,
mol_dim: int = 196,
mol_n_layers: int = 6,
mol_n_heads: int = 6,
mol_attn: str = "norm",
act: str = "relu",
attn_dropout: float = 0.1,
dropout: float = 0.3,
graph_norm_type: str = "gn",
fp_dims: List = [2048, 512, 128],
prottrans_dims: List = [2048, 1024, 512],
output_dims: List = [2048, 512, 1],
graph_pool: str = "deepset",
lr: float = 2e-3,
lr_anneal_epochs: int = 200,
weight_decay: float = 1e-2,
eps: float = 1e-16,
scheduler: str = "OneCycle",
):
super(EFA_DTI_Module, self).__init__()
self.save_hyperparameters()
self.mol_enc = GraphNet(
features=mol_dim,
qk_dim=int(mol_dim // mol_n_heads),
v_dim=max(64, int(mol_dim // mol_n_heads)),
n_layers=mol_n_layers,
n_heads=mol_n_heads,
dropout=attn_dropout,
act=act,
attn_weight_norm=mol_attn,
norm_type=graph_norm_type,
pool_type=graph_pool,
)
self.fingerprint_enc = MLP_IC(*fp_dims, dropout=dropout, act=act)
self.prottrans_enc = MLP_IC(*prottrans_dims, dropout=dropout, act=act)
outd = (
mol_dim * (1 if graph_pool == "deepset" else 2)
+ fp_dims[-1]
+ prottrans_dims[-1]
)
self.output = MLP_IC(outd, *output_dims, dropout=dropout, act=act)
def forward(self, g, fp, pt):
g_emb = self.mol_enc(g)
fp_emb = self.fingerprint_enc(fp)
pt_emb = self.prottrans_enc(pt)
yhat = self.output(th.cat([g_emb, fp_emb, pt_emb], -1)).squeeze()
return yhat
def sharing_step(self, batch, _=None):
y = batch[-1]
g, fp, pt, _ = batch
yhat = self(g, fp, pt)
return yhat, y
def training_step(self, batch, _=None):
yhat, y = self.sharing_step(batch)
mse = F.mse_loss(yhat, y)
self.log("train_mse", mse)
return mse
def validation_step(self, batch, _=None):
y, yhat = self.sharing_step(batch)
return {"yhat": yhat, "y": y}
def validation_epoch_end(self, outputs):
yhats = []
ys = []
for o in outputs:
yhats.append(o["yhat"])
ys.append(o["y"])
yhat = th.cat(yhats).detach().cpu()
y = th.cat(ys).detach().cpu()
self.log_dict(
{
"valid_mse": th.as_tensor(F.mse_loss(yhat, y), device=self.device),
"valid_ci": th.as_tensor(
concordance_index(y, yhat), device=self.device
),
"valid_r2": th.as_tensor(r2_score(y, yhat), device=self.device),
}
)
def configure_optimizers(self):
optimizer = AdaBelief(
self.parameters(),
lr=self.hparams.lr,
weight_decay=float(self.hparams.weight_decay),
eps=float(self.hparams.eps),
)
scheduler_type = {
"Lambda": th.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda epoch: max(
1e-7, 1 - epoch / self.hparams.lr_anneal_epochs
),
),
"OneCycle": th.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=0.01,
steps_per_epoch=self.num_training_steps,
epochs=self.hparams.lr_anneal_epochs,
anneal_strategy="cos",
),
}
scheduler = {
"scheduler": scheduler_type[self.hparams.scheduler],
"reduce_on_plateau": False,
"interval": "epoch",
"frequency": 1,
}
return [optimizer], [scheduler]
@property
def num_training_steps(self) -> int:
"""Total training steps inferred from datamodule and devices."""
if self.trainer.max_steps:
return self.trainer.max_steps
limit_batches = self.trainer.limit_train_batches
batches = len(self.train_dataloader())
batches = (
min(batches, limit_batches)
if isinstance(limit_batches, int)
else int(limit_batches * batches)
)
num_devices = max(1, self.trainer.num_gpus, self.trainer.num_processes)
if self.trainer.tpu_cores:
num_devices = max(num_devices, self.trainer.tpu_cores)
effective_accum = self.trainer.accumulate_grad_batches * num_devices
return (batches // effective_accum) * self.trainer.max_epochs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.