index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,100 | 86db50227faf7b96599890c0129d6caaa5cf56b5 | """Code for making plots from a completed run."""
__author__ = 'harrigan'
import logging
from os.path import join as pjoin
import os
import itertools
import pickle
import pandas as pd
import numpy as np
from IPython.parallel import Client
from .run import NoParallelView
log = logging.getLogger(__name__)
class PlotMaker():
"""From a completed run, produce plots.
:param run: The run object
:param parallel: Whether to use IPython parallel api
"""
def __init__(self, run, parallel=True, load_dir='.'):
self.run = run
self.load_dir = load_dir
# Set up optional parallelization
if parallel:
try:
c = Client()
self.lbv = c.load_balanced_view()
self.lbv.block = True
except FileNotFoundError as e:
log.error("Could not connect to parallel engine: %s", e)
self.lbv = None
else:
self.lbv = NoParallelView()
def make_plots(self):
"""Make plots for all rounds."""
n_rounds = self.run.n_rounds
log.info('Making %d frames', n_rounds)
args = [self._get_for_parallel(i) for i in range(n_rounds)]
self.lbv.map(_plot_helper, args)
def _get_for_parallel(self, round_i, rel=True):
"""Create a tuple of arguments for parallel helper."""
file = self.run.config.file
out_fn = file.plot_fn(round_i, rel=rel)
out_fn = pjoin(self.load_dir, out_fn)
out_fn = os.path.abspath(out_fn)
return self.load_convergence(round_i)[-1], self.run.params, out_fn
def load_convergence(self, round_i, rel=True):
"""Load a convergence object for a particular round."""
file = self.run.config.file
conv_fn = "{}.pickl".format(file.conv_fn(round_i, rel=rel))
conv_fn = pjoin(self.load_dir, conv_fn)
with open(conv_fn, 'rb') as conv_f:
converge = pickle.load(conv_f)
return converge
def load_convergences(self):
"""Load all convergences"""
return [self.load_convergence(i) for i in range(self.run.n_rounds)]
def convergence_dataframe(self):
"""Get a dataframe of convergences over time."""
round_is = range(self.run.n_rounds)
substeps = self.run.params.subbuild_uptos
coords = np.array(list(itertools.product(round_is, substeps)))
steps = self.run.params.spt * coords[:, 0] + coords[:, 1]
conv_vals = np.asarray(
[[c.converged for c in cs] for cs in self.load_convergences()]
).reshape(-1)
df = pd.DataFrame(dict(
round_i=coords[:, 0], steps=steps, converged=conv_vals
)).set_index('steps')
return df
def find_first_convergence(self, window=4, cutoff=0.5):
"""Use a rolling average to find step and round of first convergence.
"""
conv_df = self.convergence_dataframe()
rolling_df = pd.rolling_mean(conv_df['converged'], window).fillna(0)
steps = (rolling_df >= cutoff).argmax()
rounds = conv_df['round_i'].loc[steps] + 1
return steps, rounds
def _plot_helper(args):
"""Can be mapped."""
converge, params, fn = args
converge.plot_and_save(params, None, fn)
def find_convergence_from_filename(run_fn):
"""From a filename, return convergence data suitable for pandas
Use this from IPython.parallel map
"""
with open(run_fn, 'rb') as run_f:
run = pickle.load(run_f)
pm = PlotMaker(run, load_dir=os.path.dirname(run_fn), parallel=False)
steps, rounds = pm.find_first_convergence()
return dict(run_fn=run_fn, run_id=run.params.run_id,
spt=run.params.spt, tpr=run.params.tpr,
steps=steps, rounds=rounds) |
989,101 | eb3e297b6142fbd925dfd7a86d46dafb318f3063 | from flask import Flask
from electronics.electronics import electronics
from sports.sports import sports
from cellphone.cellphone import cellphone
from movies.movies import movies
from flask_cors import CORS
app = Flask(__name__)
app.register_blueprint(electronics,url_prefix="/electronics")
app.register_blueprint(sports,url_prefix="/sports")
app.register_blueprint(cellphone,url_prefix="/cellphone")
app.register_blueprint(movies,url_prefix="/movies")
CORS(app)
nb_closest_images = 5
@app.route('/', methods=['GET'])
def hello():
return "<h1>Welcome</h1>"
if __name__ == '__main__':
app.run()
|
989,102 | c370f3bf00b93b940df13b5434e91afc28768b89 | import requests
import csv
inputfile = open('places.txt','r')
#outputfile = csv.writer(open('geocoded-placelist.txt','w'))
for row in inputfile:
row = row.rstrip()
url = 'http://maps.googleapis.com/maps/api/geocode/json'
mysensor = 'false'
payload = {'address':row, 'sensor':mysensor}
r = requests.get(url, params=payload)
json = r.json()
#print(json[results])
lat = json['results'][0]['geometry']['location']['lat']
lng = json['results'][0]['geometry']['location']['lng']
print(lat,lng)
#newrow = [row,lat,lng]
#outputfile.writerow(newrow) |
989,103 | 794967bf4cb8aa2c849498bb8903d6209e3b4cdc | import traceback
from cloudshell.cp.aws.models.aws_ec2_cloud_provider_resource_model import AWSEc2CloudProviderResourceModel
from cloudshell.cp.core.models import CleanupNetwork
class CleanupSandboxInfraOperation(object):
def __init__(self, vpc_service, key_pair_service, route_table_service, traffic_mirror_service):
"""
:param vpc_service: VPC Service
:type vpc_service: cloudshell.cp.aws.domain.services.ec2.vpc.VPCService
:param key_pair_service: Security Group Service
:type key_pair_service: cloudshell.cp.aws.domain.services.ec2.keypair.KeyPairService
:param route_table_service:
:type route_table_service: cloudshell.cp.aws.domain.services.ec2.route_table.RouteTablesService
:param cloudshell.cp.aws.domain.services.ec2.mirroring.TrafficMirrorService traffic_mirror_service:
"""
self.vpc_service = vpc_service
self.key_pair_service = key_pair_service
self.route_table_service = route_table_service
self.traffic_mirror_service = traffic_mirror_service
def cleanup(self, ec2_client, ec2_session, s3_session, aws_ec2_data_model, reservation_id, actions, logger):
"""
:param ec2_client:
:param ec2_session:
:param s3_session:
:param AWSEc2CloudProviderResourceModel aws_ec2_data_model: The AWS EC2 data model
:param str reservation_id:
:param list[NetworkAction] actions:
:param logging.Logger logger:
:return:
"""
if not actions:
raise ValueError("No cleanup action was found")
result = CleanupNetwork()
result.actionId = actions[0].actionId
result.success = True
try:
# need to remove the keypair before we try to find the VPC
self._remove_keypair(aws_ec2_data_model, ec2_session, logger, reservation_id, s3_session)
vpc = self.vpc_service.find_vpc_for_reservation(ec2_session, reservation_id)
if not vpc:
raise ValueError('No VPC was created for this reservation')
logger.info("Deleting all instances")
self.vpc_service.delete_all_instances(vpc)
logger.info("Deleting vpc and removing dependencies")
self.vpc_service.remove_all_internet_gateways(vpc)
self.vpc_service.remove_all_security_groups(vpc, reservation_id)
self.vpc_service.remove_all_subnets(vpc)
self.vpc_service.remove_all_peering(vpc)
self._delete_blackhole_routes_in_vpc_route_table(ec2_session, ec2_client, aws_ec2_data_model)
self.vpc_service.remove_custom_route_tables(ec2_session, vpc)
logger.info('Deleting traffic mirror elements')
self.vpc_service.delete_traffic_mirror_elements(ec2_client, self.traffic_mirror_service, reservation_id,
logger)
self.vpc_service.delete_vpc(vpc)
except Exception as exc:
logger.error("Error in cleanup connectivity. Error: {0}".format(traceback.format_exc()))
result.success = False
result.errorMessage = 'CleanupSandboxInfra ended with the error: {0}'.format(exc)
return result
def _remove_keypair(self, aws_ec2_data_model, ec2_session, logger, reservation_id, s3_session):
logger.info("Removing private key (pem file) from s3")
self.key_pair_service.remove_key_pair_for_reservation_in_s3(s3_session,
aws_ec2_data_model.key_pairs_location,
reservation_id)
logger.info("Removing key pair from ec2")
self.key_pair_service.remove_key_pair_for_reservation_in_ec2(ec2_session=ec2_session,
reservation_id=reservation_id)
def _delete_blackhole_routes_in_vpc_route_table(self, ec2_session, ec2_client, aws_ec2_data_model):
rts = self.route_table_service.get_all_route_tables(ec2_session=ec2_session,
vpc_id=aws_ec2_data_model.aws_management_vpc_id)
for rt in rts:
self.route_table_service.delete_blackhole_routes(rt, ec2_client)
|
989,104 | 52adab023c13fa4fcbdee50c174911d8e6d73eeb | def main():
line = input()
small = size = len(line)
for i in range(size):
x, y, moves = i, size-1, i
while x < y:
if line[x] != line[y]:
moves += 1
x += 1
y -= 1
small = min(small, moves)
print(small)
if __name__ == '__main__':
main()
|
989,105 | 9c84c9112f471a866078045482def623d617ee5c | import numpy as np
import numba
@numba.jit("f8(f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def T_B( S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Uppstrom, L., Deep-Sea Research 21:161-162, 1974:
#return 4.157E-4 * S / 35.
# Lee, Kim, Byrne, Millero, Feely, Yong-Ming Liu. 2010. Geochimica Et Cosmochimica Acta 74 (6)
return 0.0004326 * S / 35
@numba.jit("f8(f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def T_S( S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Morris, A. W., and Riley, J. P., Deep-Sea Research 13:699-705, 1966:
return (0.14 / 96.062) * (S / 1.80655 )
@numba.jit("f8(f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def T_F( S) :
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return 0.000067 * S / 18.9984 / 1.80655
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def rho_pw( T ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
''' The density of pure water as a function of T for Salinty=0, Press = 1atm
From Zeebe p269
! variable units
T ! Temperature Kelvin
p ! Density kg / m**3
Tc ! Temperature Celcius
'''
Tc = T - 273.15
p = 999.842594 +6.793952e-2*Tc -9.095290e-3*Tc**2 \
+1.001685e-4*Tc**3 -1.120083e-6*Tc**4 \
+6.536332e-9*Tc**5
return p
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def rho_sw( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
''' The density kg m^-3 of sea water as a function of temperature and salinity
From Zeebe p270
NB. the dependence on T,S is reversed from Zeebe so it is consistent with
the ordering of T,S in the equlibrium constants K_*( T,S )
! variable units
T Temperature Kelvin
S ! Salinity psu
p ! Density kg / m**3
Tc ! Temperature Celcius
'''
Tc = T - 273.15
A = 8.24493e-1 -4.0899e-3*Tc +7.6438e-5*Tc**2 \
-8.2467e-7*Tc**3 +5.3875e-9*Tc**4
B = -5.72466e-3 +1.0227e-4*Tc -1.6546e-6*Tc**2
C = 4.83140e-4
p = rho_pw(T) + A*S + B*S**1.5 + C*S**2
return p
#@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K0_O2( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
''' Equlibrium constant for O2 solubilty in water, Henry's Law
THIS IS FOR STANDARD ATMOSPHERIC COMPOSITION - NEAC
and 100% WATER VAPOUR CONTENT C^{sa}
sol gas
[O2] = K0 fCO2
fO2 : fugacity, or approx. the partial pressure of O2A
From Weiss 1970
the units are:
[O2] : mol / kg_soln
fO2 : atm
From: Battino, Rettich, Tominaga
The Solubility of oxygen and Ozone in Liquids
J. Phys. Chem. Ref. Data Vol 12, No. 2 1983
Eq 19 with Table 4 column 3
'''
method='W'
if( method=='W'):
A1= -1282.8704; A2= 36619.96
A3= 223.1396; A4= -0.354707
B1= 5.957e-3; B2=-3.7353; B3= 3.68e-6
K0_O2 = A1 + A2/T +A3*np.log(T) + A4*T + S*(B1+B2/T) + B3*S**2
K0_O2 = np.exp( K0_O2 )
# convert from NEAC, 20.94% oxygen to 1atm and from uM to M
K0_O2 = K0_O2 / 0.2094 * 1E-6
#! from
#! Oxygen solubility in sea water
#! limnol. Oceanography 37(6) 1992 1307
#! Garcia and Gordon
#double precision,parameter :: &
if( method=='G'):
A0=5.80818; A1=3.20684; A2=4.11890; A3=4.93845; A4=1.01567; A5=1.41575
B0=-7.01211E-3; B1=-7.25958E-3; B2=7.93334E-3; B3=-5.54491E-3
C0= -1.32412E-7
#
#double precision :: Ts
#
Ts = np.log((298.15-(T-273.15))/(273.15+(T-273.15)))
K0_O2 = A0 + A1*Ts + A2*Ts**2 + A3*Ts**3 + A4*Ts**4 + A5*Ts**5 \
+S*( B0 + B1*Ts + B2*Ts**2 + B3*Ts**3 ) \
+C0*S**2
K0_O2 = np.exp( K0_O2 )
# convert from NEAC, 20.94% oxygen to 1atm and from uM to M
K0_O2 = K0_O2 / 0.2094 * 1E-6
return K0_O2
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K0_CO2( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# T in Kelvin
# Equlibrium constant for CO2 solubilty in water, Henry's Law
# sol gas
#
# [CO2] = K0 fCO2
#
# fCO2 : fugacity, or approx. the partial pressure of CO2
# Zeebe eq A.3.6 pg 256
# K0 from Weiss 1974
#
# the units are
# CO2 : mol / kg_soln
# fCO2 : atm
K0_CO2 = 9345.17/T - 60.2409 + 23.3585*np.log(T/100.) \
+ S*( 0.023517 - 0.00023656*T + 0.0047036*(T/100.)**2 )
K0_CO2 = np.exp( K0_CO2 )
return K0_CO2
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def CO2_fugacity_const( T ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# fCO2 = fugacity_const * pCOS
#
# from Zeebe p 65
p=101325. # pressure in Pa, 1atm = 101325Pa
R=8.314 # Gas constant
B = (-1636.75 + 12.0408*T - 3.27957e-2*T**2 \
+3.16528e-5*T**3 ) * 1.e-6
delta = ( 57.7 - 0.118*T) * 1.e-6
c = np.exp( p*(B+2.*delta)/(R*T ) )
return c
@numba.jit("f8(f8,f8,i4)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K1_H2CO3( T, S, const=10 ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if( const== 8 ):
K1_H2CO3 = np.exp(290.9097 - 14554.21 / T - 45.0575 * np.log(T))
if( const==10 ):
K1_H2CO3 = 3633.86/T - 61.2172 + 9.6777*np.log(T) - 0.011555*S + 0.0001152*S**2
K1_H2CO3 = 10**(-K1_H2CO3)
return K1_H2CO3
@numba.jit("f8(f8,f8,i4)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K2_H2CO3( T, S, const=10 ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if( const== 8 ):
K2_H2CO3 = np.exp(207.6548 - 11843.79 / T - 33.6485 * np.log(T))
if( const==10 ):
K2_H2CO3 = 471.8/T + 25.9290 - 3.16967*np.log(T) - 0.01781*S + 0.0001122*S**2
K2_H2CO3 = 10**(-K2_H2CO3)
return K2_H2CO3
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K_S( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
''' The equlibrium constant for Bisulphate ion
K_SO4 [HSO4-] = [H+]_F [SO4--]
Zeebe p259
pH scale is free scale , pH_F
This is used to convert
pH_F <-> pH_T <-> pH_SWS
'free' Hansson seawater scale '''
# Dickson, A. G., J. Chemical Thermodynamics, 22:113-127, 1990"
I = 19.924*S \
/ (1000. - 1.005*S )
logT = np.log(T)
K_S = -4276.1/T + 141.328 - 23.093*logT \
+ (-13856./T + 324.57 - 47.986*logT ) * np.sqrt(I) \
+ ( 35474./T - 771.54 + 114.723*logT ) * I \
-2698./T * I**1.5 + 1776./T * I**2
K_S = np.exp(K_S) * (1 - 0.001005 * S)
return K_S
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K_F( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
''' equilibrium constant for
[HF] K_F = [H+] [F-]
from Zeebe p 260
pH scale is Hansson, pH_T
'''
I = 19.924*S \
/(1000. - 1.005*S)
TS = T_S(S)
KS = K_S(T,S)
## Dickson, A. G. and Riley, J. P., Marine Chemistry 7:89-99, 1979:
#K_F = 1590.2/T - 12.641 + 1.525*np.sqrt(I)
# # this term converts
# # 'free' -> Hansson
#K_F = np.exp(K_F) * (1.0 - 0.001005*S)* (1.+ TS/KS)
# Perez,F.F. and Fraga, F., Marine Chemistry 21(2):161-168, 1987
#S =10-40 T=9-33 oC
lnKF = -874. / T - 0.111 * np.sqrt(S)+ 9.68
lnKF = -lnKF
K_F = np.exp(lnKF)
return K_F
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K_NH3( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PKNH4expCW = 9.244605 - 2729.33 * (1. / 298.15 - 1. / T)
PKNH4expCW = PKNH4expCW + (0.04203362 - 11.24742 / T) * S**0.25
PKNH4expCW = PKNH4expCW + (-13.6416 + 1.176949 * T**0.5 - 0.02860785 * T + 545.4834 / T) * S ** 0.5
PKNH4expCW = PKNH4expCW + (-0.1462507 + 0.0090226468 * T ** 0.5 - 0.0001471361 * T + 10.5425 / T) * S ** 1.5
PKNH4expCW = PKNH4expCW + (0.004669309 - 0.0001691742 * T ** 0.5 - 0.5677934 / T) * S ** 2
PKNH4expCW = PKNH4expCW + (-2.354039E-05 + 0.009698623 / T) * S ** 2.5
KNH4 = 10.**(-PKNH4expCW) # this is on the total pH scale in mol/kg-H2O
KNH4 = KNH4 * (1. - 0.001005 * S) # convert to mol/kg-SW
return KNH4
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K_H2S( T,S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
lnKH2S = 225.838 - 13275.3/T - 34.6435*np.log(T)+ 0.3449*np.sqrt(S) - 0.0274*S
KH2S = np.exp(lnKH2S)
return KH2S
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K1_P( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
K1_P = -4576.752/T +115.525 -18.453*np.log(T) \
+(-106.736/T+0.69171)*np.sqrt(S) + (-0.65643/T-0.01844)*S
K1_P = np.exp(K1_P)
return K1_P
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K2_P( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
K2_P = -8814.715/T + 172.1033 -27.927*np.log(T) \
+(-160.34/T+1.35661)*np.sqrt(S) + (0.37355/T-0.05778)*S
K2_P = np.exp(K2_P)
return K2_P
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K3_P( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
K3_P = -3070.75/T -18.126 + ( 17.27039/T + 2.81197 )*np.sqrt(S) \
+ (-44.994846/T - 0.09984)*S
K3_P = np.exp(K3_P)
return K3_P
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K_W( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
logT = np.log(T)
K_W = 148.9802 - 13847.26/T - 23.6521 * logT \
+(118.67/T - 5.977 + 1.0495*logT)*np.sqrt(S) - 0.01615*S
K_W = np.exp(K_W)
return K_W
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K_Si( T, S ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
I = 19.924*S \
/(1000. - 1.005*S)
# K_Si = -8904.2/T + 117.385 -19.334*np.log(T) \
K_Si = -8904.2/T + 117.4 -19.334*np.log(T) \
+ ( 3.5913 - 458.79 /T )*np.sqrt(I) + ( 188.74/T - 1.5998)*I \
+ ( 0.07871 - 12.1652/T )*I**2
K_Si = np.exp(K_Si)*(1.0-0.001005*S)
return K_Si
@numba.jit("f8(f8,f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def K_B( T, S ) :
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Dickson, A. G., Deep-Sea Research 37:755-766, 1990:
sqrtS = np.sqrt(S)
K_B = ( -8966.90 - 2890.53*sqrtS - 77.942*S + 1.728*S*sqrtS \
-0.0996*S**2 ) / T \
+148.0248 + 137.1942*sqrtS + 1.62142*S \
-(24.4344 + 25.085*sqrtS + 0.2474*S)*np.log(T) \
+0.053105*sqrtS*T
#! Needed for converting from Hansson pH_T -> seawater pH_SWS
TS = T_S( S ) ; KS = K_S(T,S) # pH_F
TF = T_F( S ) ; KF = K_F(T,S) # pH_T
K_B = np.exp( K_B ) * (1. + TS/KS)/( 1. + TS/KS + TF/KF )
return K_B
@numba.jit("f8( f8,f8,f8, f8,f8,f8,f8, f8,f8, i4,f8,f8, i4,i4)", nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def CC_solve_pH_arr( S,T,P, TP,TSi,TNH3,TH2S, TA,DIC, const,K1_f,K2_f, pHi,scale):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#! Needed for converting from Hansson pH_T -> seawater pH_SWS
TS = T_S( S ) ; KS = K_S(T,S) # pH_F
TF = T_F( S ) ; KF = K_F(T,S) # pH_T
SWS_2_T = (1. + TS/KS)/( 1. + TS/KS + TF/KF )
Free_2_T = 1. + TS/KS
KW = K_W( T,S) # pH_T
KB = K_B( T,S )/SWS_2_T
TB = T_B(S)
K1 = K1_H2CO3( T,S, const=const) *K1_f#/ SWS_2_T
K2 = K2_H2CO3( T,S, const=const) *K2_f#/ SWS_2_T
K1P = K1_P(T,S)#/SWS_2_T
K2P = K2_P(T,S)#/SWS_2_T
K3P = K3_P(T,S)#/SWS_2_T
KSi = K_Si(T,S)#/SWS_2_T
KNH3 = K_NH3(T,S)
KH2S = K_H2S(T,S)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def obj( pH ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
h = 10**(-pH)
h_free = h/Free_2_T
y = DIC*(K1*h+2.*K1*K2)/(h*h+K1*h+K1*K2) \
- h_free + KW/h \
- TA \
+TB /(1.+h/KB) \
+TP*(K1P*K2P*h+2*K1P*K2P*K3P-h**3)/(h**3+K1P*h**2+K1P*K2P*h+K1P*K2P*K3P) \
-TF /(1.+KF/h_free) \
+TSi/(1.+h/KSi) \
+TNH3/(1.+h/KNH3) \
+TH2S /(1.+h/KH2S) \
-TS /(1.+KS/h_free)
y = y*1e6
return y
#@numba.jit("f8(f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def Dobj( pH ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
h = 10**(-pH)
dy = DIC*(K1 +2*K1*K2)/(h**2+K1*h+K1*K2) \
-DIC*(K1*h+2*K1*K2)/(h**2+K1*h+K1*K2)**2*(2*h+K1) \
-TB *1./(1+h/KB)**2 / KB \
-KW/h**2 - 1./Free_2_T \
+TP*(K1P*K2P -3.*h**2)/(h**3+K1P*h**2+K1P*K2P*h+K1P*K2P*K3P) \
-TP*(K1P*K2P*h+2*K1P*K2P*K3P-h**3)/(h**3+K1P*h**2+K1P*K2P*h+K1P*K2P*K3P)**2\
*(3.*h**2+2.*K1P*h+K1P*K2P) \
-TF /(h+KF)**2 * KF \
-TSi *KSi /(KSi +h)**2 \
-TNH3*KNH3/(KNH3+h)**2 \
-TH2S*KH2S/(KH2S+h)**2 \
-TS /(h+KS*Free_2_T)**2*KS*Free_2_T
dy =dy*1e6 * (-np.log(10.)*10**(-pH))
return dy
#if( pHi is None ):
if( True ):
# find a rough range by bisection
h0 = 12.0
h1 = 7.0
h2 = 3.0
for i in range(10):
h1 = (h0+h2)/2.0
f0 = obj(h0)
f1 = obj(h1)
f2 = obj(h2)
if( ( f0<0. and f1>0. and f2>0. ) or \
( f0>0. and f1<0. and f2<0. ) ):
h2 = h1
elif( ( f0<0. and f1<0. and f2>0. ) or \
( f0>0. and f1>0. and f2<0. ) ):
h0 = h1
else:
break
pH0=h1
else:
pH0=pHi
# from scipy.optimize import newton
# pH_opt=newton( obj,pH0, tol=1e-6 )
# pH_opt=newton( obj,pH0,fprime=Dobj, tol=1e-6 )
# pH0 = pH_opt
for i in range(100):
f0 = obj( pH0 )
df0 = Dobj( pH0 )
pH1 = pH0 - f0/df0
if( (abs(f0)<1e-8) and (abs(pH0-pH1)<0.01 )):
break
pH0 = pH1
if( scale==1): pH0=pH0 +np.log10( Free_2_T)
return pH0
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def CO2sys( data_in, const, scale='tot', K1_f=1.0, K2_f=1.0 ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
nd,nr = np.shape(data_in)
names=('pH','fCO2','pCO2','HCO3','CO3','CO2','AB','OH','AP','ASi','ANH3','AHS2',
'H3PO4','H2PO4','HPO4','PO4', 'NH4','NH3','SiO(OH)3')
outp= np.zeros( nd, dtype={'names':names,'formats':tuple('f8' for i in range(len(names)))} )
scalei=0
if( scale in ['tot',0] ): scalei=0
pH0=7.
for i,data in enumerate(data_in):
S = data[0]
TK = data[1]+273.15
P = data[2]
TP,TSi,TNH3,TH2S, TA,DIC = data[3:9]*1e-6
pH = CC_solve_pH_arr( S,TK,P, TP,TSi,TNH3,TH2S, TA,DIC, const,K1_f,K2_f,pH0, 0 )
pH0=pH
TS = T_S( S ) ; KS = K_S(TK,S) # pH_F
TF = T_F( S ) ; KF = K_F(TK,S) # pH_T
SWS_2_T = (1. + TS/KS)/( 1. + TS/KS + TF/KF )
Free_2_T = 1. + TS/KS
K1 = K1_H2CO3( TK,S, const=const)*K1_f
K2 = K2_H2CO3( TK,S, const=const)*K2_f
KW = K_W( TK,S) # pH_T
KB = K_B( TK,S )/SWS_2_T
TB = T_B(S)
K1P = K1_P(TK,S)#/SWS_2_T
K2P = K2_P(TK,S)#/SWS_2_T
K3P = K3_P(TK,S)#/SWS_2_T
KSi = K_Si(TK,S)#/SWS_2_T
KNH3= K_NH3(TK,S)
TP,TSi,TNH3,TH2S, TA,DIC = data[3:9]
outp['pH'][i]=pH
if( scale=='free'): outp['pH'][i]=pH +np.log10( Free_2_T)
H = 10**(-pH)
H2 = H*H
H3 = H2*H
denom = (H2+K1*H+K1*K2)
CO2 = DIC*H2 /denom
outp[ 'CO2'][i] = CO2
outp[ 'HCO3'][i] = DIC*H *K1 /denom
outp[ 'CO3'][i] = DIC *K1*K2/denom
outp[ 'OH'][i] = KW/H
outp['pCO2'][i] = CO2 / K0_CO2(TK,S) / CO2_fugacity_const(TK)
denom = H3 + K1P*H2 + K1P*K2P*H + K1P*K2P*K3P
outp['H3PO4'][i] = TP/denom*H3
outp['H2PO4'][i] = TP/denom*H2*K1P
outp[ 'HPO4'][i] = TP/denom*H *K1P*K2P
outp[ 'PO4'][i] = TP/denom *K1P*K2P*K3P
outp[ 'AP'][i] = +outp[ 'HPO4'][i] + 2*outp[ 'PO4'][i] -outp['H3PO4'][i]
denom = (1.+H/KSi)
outp['SiO(OH)3'][i] = TSi/denom
outp['ASi'] [i] = TSi/denom
denom = (KNH3+H)
outp[ 'NH3'][i] = TNH3/denom *KNH3
outp[ 'NH4'][i] = TNH3/denom * H
outp['ANH3'][i] = outp[ 'NH3'][i]
return outp
@numba.jit("f8( f8, f8, f8,f8, i4, optional(f8), optional(f8), optional(f8),optional(f8),optional(f8),f8,f8,optional(f8),optional(i4))")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def CC_solve_pH( DIC, TA, T,S, const, TP=0., TSi=0, TB=None, TS=None, TF=None, \
K1_f=1.0, K2_f=1.0, pHi=None, scale=0 ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#! Needed for converting from Hansson pH_T -> seawater pH_SWS
TS = T_S( S ) ; KS = K_S(T,S) # pH_F
TF = T_F( S ) ; KF = K_F(T,S) # pH_T
if( TS is None ): TS = T_S(S)
if( TF is None ): TF = T_F(S)
SWS_2_T = (1. + TS/KS)/( 1. + TS/KS + TF/KF )
Free_2_T = 1. + TS/KS
K1 = K1_H2CO3( T,S, const=const) *K1_f#/ SWS_2_T
K2 = K2_H2CO3( T,S, const=const) *K2_f#/ SWS_2_T
KW = K_W( T,S) # pH_T
KB = K_B( T,S )/SWS_2_T
if( TB is None ): TB = T_B(S)
K1P = K1_P(T,S)#/SWS_2_T
K2P = K2_P(T,S)#/SWS_2_T
K3P = K3_P(T,S)#/SWS_2_T
KSi = K_Si(T,S)#/SWS_2_T
#@numba.jit("f8(f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def obj( pH ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
h = 10**(-pH)
h_free = h/Free_2_T
y = DIC*(K1*h+2.*K1*K2)/(h*h+K1*h+K1*K2) \
- h_free + KW/h \
- TA \
+TB /(1.+h/KB) \
+TP*(K1P*K2P*h+2*K1P*K2P*K3P-h**3)/(h**3+K1P*h**2+K1P*K2P*h+K1P*K2P*K3P) \
-TF /(1.+KF/h_free) \
+TSi/(1.+h/KSi) \
-TS /(1.+KS/h_free)
y = y*1e6
return y
#@numba.jit("f8(f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def Dobj( pH ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
h = 10**(-pH)
dy = DIC*(K1 +2*K1*K2)/(h**2+K1*h+K1*K2) \
-DIC*(K1*h+2*K1*K2)/(h**2+K1*h+K1*K2)**2*(2*h+K1) \
-TB *1./(1+h/KB)**2 / KB \
-KW/h**2 - 1./Free_2_T \
+TP*(K1P*K2P -3.*h**2)/(h**3+K1P*h**2+K1P*K2P*h+K1P*K2P*K3P) \
-TP*(K1P*K2P*h+2*K1P*K2P*K3P-h**3)/(h**3+K1P*h**2+K1P*K2P*h+K1P*K2P*K3P)**2\
*(3.*h**2+2.*K1P*h+K1P*K2P) \
-TF /(h+KF)**2 * KF \
+TSi /(KSi+h/KSi)**2 \
-TS /(h+KS*Free_2_T)**2*KS*Free_2_T
dy =dy*1e6 * (-np.log(10.)*10**(-pH))
return dy
if( pHi is None ):
# find a rough range by bisection
h0 = 12.0
h1 = 7.0
h2 = 3.0
for i in range(8):
h1 = (h0+h2)/2.0
f0 = obj(h0)
f1 = obj(h1)
f2 = obj(h2)
if( ( f0<0. and f1>0. and f2>0. ) or \
( f0>0. and f1<0. and f2<0. ) ):
h2 = h1
elif( ( f0<0. and f1<0. and f2>0. ) or \
( f0>0. and f1>0. and f2<0. ) ):
h0 = h1
else:
break
pH0=h1
else:
pH0=pHi
# from scipy.optimize import newton
# pH_opt=newton( obj,pH0, tol=1e-6 )
# pH_opt=newton( obj,pH0,fprime=Dobj, tol=1e-6 )
# pH0 = pH_opt
for i in range(100):
f0 = obj( pH0 )
df0 = Dobj( pH0 )
pH1 = pH0 - f0/df0
if( abs(f0)<1e-6 ): break
pH0 = pH1
return pH0
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def CC_solve( DIC, TA, T,S, const=10, TP=0., TSi=0, TB=None, TS=None, TF=None, \
K1_f=1.0, K2_f=1.0, pHi=None, scale=0 ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#! Needed for converting from Hansson pH_T -> seawater pH_SWS
TS = T_S( S ) ; KS = K_S(T,S) # pH_F
TF = T_F( S ) ; KF = K_F(T,S) # pH_T
if( TS==None ): TS = T_S(S)
if( TF==None ): TF = T_F(S)
SWS_2_T = (1. + TS/KS)/( 1. + TS/KS + TF/KF )
Free_2_T = 1. + TS/KS
K1 = K1_H2CO3( T,S, const=const) *K1_f#/ SWS_2_T
K2 = K2_H2CO3( T,S, const=const) *K2_f#/ SWS_2_T
KW = K_W( T,S) # pH_T
KB = K_B( T,S )/SWS_2_T
if( TB==None ): TB = T_B(S)
K1P = K1_P(T,S)#/SWS_2_T
K2P = K2_P(T,S)#/SWS_2_T
K3P = K3_P(T,S)#/SWS_2_T
KSi = K_Si(T,S)#/SWS_2_T
#@numba.jit("f8(f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def obj( pH ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
h = 10**(-pH)
h_free = h/Free_2_T
y = DIC*(K1*h+2.*K1*K2)/(h*h+K1*h+K1*K2) \
- h_free + KW/h \
- TA \
+TB /(1.+h/KB) \
+TP*(K1P*K2P*h+2*K1P*K2P*K3P-h**3)/(h**3+K1P*h**2+K1P*K2P*h+K1P*K2P*K3P) \
-TF /(1.+KF/h_free) \
+TSi/(1.+h/KSi) \
-TS /(1.+KS/h_free)
y = y*1e6
return y
#@numba.jit("f8(f8)",nopython=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def Dobj( pH ):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
h = 10**(-pH)
dy = DIC*(K1 +2*K1*K2)/(h**2+K1*h+K1*K2) \
-DIC*(K1*h+2*K1*K2)/(h**2+K1*h+K1*K2)**2*(2*h+K1) \
-TB *1./(1+h/KB)**2 / KB \
-KW/h**2 - 1./Free_2_T \
+TP*(K1P*K2P -3.*h**2)/(h**3+K1P*h**2+K1P*K2P*h+K1P*K2P*K3P) \
-TP*(K1P*K2P*h+2*K1P*K2P*K3P-h**3)/(h**3+K1P*h**2+K1P*K2P*h+K1P*K2P*K3P)**2\
*(3.*h**2+2.*K1P*h+K1P*K2P) \
-TF /(h+KF)**2 * KF \
+TSi /(KSi+h/KSi)**2 \
-TS /(h+KS*Free_2_T)**2*KS*Free_2_T
dy =dy*1e6 * (-np.log(10.)*10**(-pH))
return dy
if( pHi is None ):
# find a rough range by bisection
h0 = 12.0
h1 = 7.0
h2 = 3.0
for i in range(8):
h1 = (h0+h2)/2.0
f0 = obj(h0)
f1 = obj(h1)
f2 = obj(h2)
if( ( f0<0. and f1>0. and f2>0. ) or \
( f0>0. and f1<0. and f2<0. ) ):
h2 = h1
elif( ( f0<0. and f1<0. and f2>0. ) or \
( f0>0. and f1>0. and f2<0. ) ):
h0 = h1
else:
break
pH0=h1
else:
pH0=pHi
# from scipy.optimize import newton
# pH_opt=newton( obj,pH0, tol=1e-6 )
# pH_opt=newton( obj,pH0,fprime=Dobj, tol=1e-6 )
# pH0 = pH_opt
for i in range(100):
f0 = obj( pH0 )
df0 = Dobj( pH0 )
pH1 = pH0 - f0/df0
if( abs(f0)<1e-6 ): break
pH0 = pH1
H = 10**(-pH0)
H2 = H*H
H3 = H*H*H
outp={}
denom = (H2+K1*H+K1*K2)
CO2 = DIC*H2 /denom
outp[ 'CO2'] = CO2
outp[ 'HCO3'] = DIC*H *K1 /denom
outp[ 'CO3'] = DIC *K1*K2/denom
outp[ 'OH'] = KW/H
outp['pCO2'] = CO2/K0_CO2(T,S) / CO2_fugacity_const(T)
#print("pCO2",outp['pCO2'] )
#print("CO2",CO2)
#print("K0", K0_CO2(T,S))
#print("CO2_fug",CO2_fugacity_const(T))
#sdfjk
denom = ( H3 + K1P*H2+ H*K1P*K2P+K1P*K2P*K3P )
outp['H3PO4'] = TP*H3 /denom
outp['H2PO4'] = TP*H2*K1P /denom
outp[ 'HPO4'] = TP*H *K1P*K2P /denom
outp[ 'PO4'] = TP *K1P*K2P*K3P/denom
outp[ 'AP'] = outp['HPO4'] +2*outp['PO4'] -outp['H3PO4']
outp[ 'ASi'] = TSi*KSi/(KSi+H)
if( scale=='free' ) : pH0 = pH0/free_2_T
return pH0,outp
|
989,106 | 112d962c7d410d9d8dad682873b45cf0b94b326f | from django.urls import path
from . import views
app_name = 'finance'
urlpatterns = [
path('typelist', views.typelist , name= 'typelist'),
path('typeadd', views.typeadd , name= 'typeadd'),
path('typedel', views.typedel , name= 'typedel'),
path('recordlist', views.recordlist , name= 'recordlist'),
path('recordadd', views.recordadd , name= 'recordadd'),
path('recorddel', views.recorddel , name= 'recorddel'),
path('recordclean', views.recordclean , name= 'recordclean'),
] |
989,107 | 18f449add89a2869b2d7b4893e0f97de08f88360 | import xlsxwriter
import numpy as np
S=[0,1,2,3]
stateValue=[0.5,0.5,0.5,0.5]
reward=-1
epsilon=0.1
startstate=1
run=range(500)
book=xlsxwriter.Workbook('data')
sheet=book.add_worksheet('data')
k=0
for i in S:
sheet.write(k,i,'状态'+str(i))
k+=1
for i in run:#(1-epsilon/2)right
for s in S:
if s==3:
break
elif s==1:
stateValue[s]=(1-epsilon/2)*(reward+stateValue[s-1])+(epsilon/2)*(reward+stateValue[s+1])
elif s==0:
stateValue[s]=(1-epsilon/2)*(reward+stateValue[s+1])+(epsilon/2)*(reward+stateValue[s])
elif s==2:
stateValue[s] = (1 - epsilon / 2) * (reward + stateValue[s + 1]) + (epsilon / 2) * (reward + stateValue[s-1])
sheet.write(k,s,stateValue[s])
k=k+1
print(stateValue)
for i in S:
sheet.write(k,i,'状态'+str(i))
k+=1
for i in run:#(1-epsilon/2)left
for s in S:
if s==3:
break
elif s==1:
stateValue[s]=(1-epsilon/2)*(reward+stateValue[s+1])+(epsilon/2)*(reward+stateValue[s-1])
elif s==0:
stateValue[s]=(1-epsilon/2)*(reward+stateValue[s])+(epsilon/2)*(reward+stateValue[s+1])
elif s==2:
stateValue[s] = (1 - epsilon / 2) * (reward + stateValue[s - 1]) + (epsilon / 2) * (reward + stateValue[s+1])
sheet.write(k,s,stateValue[s])
k+=1
print(stateValue)
sheet.write(k,0,'probobilityOfright')
sheet.write(k,1,'状态'+str(0))
probobility=np.arange(0.02,0.99,0.01) #right
k+=1
for E in probobility:
for i in run:#(epsilon)right
for s in S:
if s==3:
break
elif s==1:
stateValue[s]=(E)*(reward+stateValue[s-1])+(1-E)*(reward+stateValue[s+1])
elif s==0:
stateValue[s]=(E)*(reward+stateValue[s+1])+(1-E)*(reward+stateValue[s])
elif s==2:
stateValue[s] = (E) * (reward + stateValue[s + 1]) + (1-E) * (reward + stateValue[s-1])
sheet.write(k,0,E)
sheet.write(k,1,stateValue[0])
k=k+1
book.close()
|
989,108 | 8e7d741106194e73a73462d19a11f0c78bbd51a0 | import csv
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Optional, Any, Iterator
import numpy as np
# listの値の型変換
def list_val_type_conv(data: List[str]) -> List[Any]:
numbers = {str(i) for i in range(10)}
result = []
for value in data:
if (type(value) is float) or (type(value) is int):
result.append(value)
continue
str_set = set()
for string in value:
str_set.add(str(string))
try:
result.append(float(value))
except ValueError:
if value in ['True', 'False', 'true', 'false']:
result.append(bool(value))
elif str_set - numbers:
try:
if '-' in value:
ts = datetime.strptime(value + '+0900', '%Y-%m-%d %H:%M:%S.%f%z').isoformat()
result.append(ts)
elif '/' in value:
ts = datetime.strptime(value + '+0900', '%Y/%m/%d %H:%M:%S.%f%z').isoformat()
result.append(ts)
except ValueError:
result.append(value)
elif value == '':
result.append(None)
else:
pass
return result
# echonetliteのcsvをlineProtocolに変換
def elcsv_lp_generator(csvfile: str) -> Iterator[List[Dict[str, Any]]]:
with open(csvfile) as f:
reader = csv.reader(f)
header = next(reader)
header.append('ch_total')
for record in reader:
record = list_val_type_conv(record)
ch_total = 0
for num in record[1:]:
if (type(num) is float) or (type(num) is int) and (num != np.nan):
ch_total += num
record.append(ch_total)
line_protocol = [{
'measurement': 'echonet',
'time': record[0],
'fields': {k: v for k, v in zip(header[1:], record[1:])}
}]
yield line_protocol
def el_socket_lp(columns: List[str], record: List[Any]) -> List[Dict[str, Any]]:
columns.append('ch_total')
record = list_val_type_conv(record)
ch_total = 0
for num in record[1:]:
if (type(num) is float) or (type(num) is int) and (num != np.nan):
ch_total += num
record.append(ch_total)
line_protocol = [{
'measurement': 'echonet',
'time': record[0],
'fields': {k: v for k, v in zip(columns[1:], record[1:])}
}]
return line_protocol
|
989,109 | 083b69c0a875cc0fc54f3032e340fe08be9cbbf2 | # Generated by Django 3.1.7 on 2021-02-25 08:09
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Logintable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=300)),
('password', models.CharField(max_length=300)),
('type', models.CharField(max_length=200)),
],
options={
'db_table': 'Logintable',
},
),
migrations.CreateModel(
name='Register',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('dob', models.DateField()),
('phone', models.CharField(max_length=200)),
('password', models.CharField(max_length=200)),
('image', models.CharField(max_length=200)),
('type', models.CharField(max_length=200)),
],
options={
'db_table': 'Register',
},
),
]
|
989,110 | c9651f3fe786cea782ceb8efd5147a6c905f42fe | #!/usr/bin/env python
"""Main experimental script. Randomly generates dataset and trains NTP model on dataset n times, and reports aggregate results."""
import os
import argparse
import random
import re
from collections import OrderedDict
import datetime
import tensorflow as tf
import numpy as np
from sklearn import metrics
from ntp.modules.train import train_model
from ntp.modules.eval import prop_rules, weighted_prop_rules, weighted_precision, confidence_accuracy
from ntp.modules.generate import gen_simple, gen_relationships, write_data, write_relationships, write_simple_templates, gen_test_kb, gen_constant_dict, count_active
from ntp.util.util_data import load_conf
from ntp.util.util_kb import load_from_list, load_from_file, relationship_id_to_symbol
if __name__ == '__main__':
tf.enable_eager_execution()
parser = argparse.ArgumentParser()
parser.add_argument('-conf_path', default="conf_synth/algexp.conf")
args = parser.parse_args()
path = args.conf_path
conf = load_conf(path)
n_pred = conf["experiment"]["n_pred"]
n_constants = conf["experiment"]["n_constants"]
n_rel = conf["experiment"]["n_rel"]
body_predicates = conf["experiment"]["n_body"]
order = conf["experiment"]["order"]
n_rules = conf["experiment"]["n_rules"]
p_normal = conf["experiment"]["p_normal"]
p_relationship = conf["experiment"]["p_relationship"]
base_seed = conf["experiment"]["base_seed"]
random.seed(base_seed)
np.random.seed(base_seed)
n_runs = conf["experiment"]["n_runs"]
base_dir = conf["logging"]["log_dir"] + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + "/"
summary_writer = tf.contrib.summary.create_file_writer(base_dir + conf["experiment"]["name"])
eval_history = OrderedDict()
eval_keys = ["prop_rules", "active_facts"]
if conf["experiment"]["test"] == True:
eval_keys.extend(["MRR", "randomMRR", "fact-roc-auc"])
for key in eval_keys:
eval_history[key] = list()
auc_helper_list = list()
for i in range(n_runs):
print("Run: " + str(i))
conf["logging"]["log_dir"] = base_dir + "run" + str(i) + "/"
conf["training"]["seed"] = np.random.randint(100)
relationships = gen_relationships(n_pred, n_rel, body_predicates=body_predicates)
symbol_relationships = relationship_id_to_symbol(relationships)
train_data = gen_simple(n_pred, relationships, p_normal, p_relationship, n_constants, order=order)
train_list = write_data(train_data)
rules_list = write_simple_templates(n_rules, body_predicates=body_predicates, order=order)
if conf["experiment"]["test"] == True:
test_kb, train_list = gen_test_kb(train_list, conf["experiment"]["n_test"], conf["experiment"]["test_active_only"], relationships)
else:
test_kb = None
kb = load_from_list(train_list)
templates = load_from_list(rules_list, rule_template=True)
rules, confidences, eval_dict = train_model(kb, templates, conf, relationships=symbol_relationships, test_kb=test_kb)
print(relationships)
constant_dict = gen_constant_dict(train_list)
eval_dict["active_facts"] = count_active(constant_dict, relationships)
eval_dict["active_ratio"] = eval_dict["active_facts"] / len(train_list)
for key, value in eval_dict.items():
if key in eval_history:
print(key, value)
eval_history[key].append(value)
auc_helper_list.append(eval_dict["auc_helper"])
print(eval_dict["auc_helper"])
print(conf["model"], conf["experiment"])
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
for key, value in eval_history.items():
tf.contrib.summary.scalar(key, np.mean(eval_history[key]), step=0)
print("average " + key + ":", str(np.mean(value)) + " (" + str(np.std(value)/np.sqrt(n_runs)) + ")")
targets, scores = [], []
for run_tuple in auc_helper_list:
targets.extend(run_tuple[0])
scores.extend(run_tuple[1])
if all(elem == targets[0] for elem in targets):
pr_auc = 1.0
else:
pr_auc = metrics.average_precision_score(targets, scores)
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("rule-pr-auc", pr_auc, step=0)
print("rule-pr-auc: " + str(pr_auc))
|
989,111 | bd2e9c0dc19b155f047bf684bdb5d1b50ed39d08 | import os
import re
RX_LETTER = re.compile(r"^([a-zA-Z]):")
RX_DOLLAR_VAR = re.compile(r"\$([A-Za-z][A-Z,a-z0-9_]+)")
def _expand_context(path, context):
result = path
if context:
for match in RX_DOLLAR_VAR.finditer(path):
key = match.group(1)
replacement = context.get(key)
if replacement is not None:
result = result.replace("${}".format(key), replacement)
return result
def _normalize_dots(components):
currentdir = "."
parentdir = ".."
result = []
for c in components:
if c == currentdir:
pass
elif c == parentdir:
if not len(result):
raise ValueError("Can't resolve components due to '..' overflow:")
del result[-1]
else:
result.append(c)
return result
class Path(object):
def __init__(self, path, **kw):
"""Initialize a generic absolute path.
If path is a list, then each element will be a component of the path.
If it's a string then expand context variables.
Also expand, env vars and user unless explicitly told not to with the
no_expand option.
"""
if not path:
raise ValueError("Empty path")
if isinstance(path, list):
ipath = path[:]
self._drive_letter = ipath.pop(0)[0] if RX_LETTER.match(ipath[0]) else None
self._components = _normalize_dots(ipath)
self._absolute = True
else:
context = kw.get("context")
if context:
path = _expand_context(path, context)
if not kw.get("no_expand", False):
path = os.path.expanduser(os.path.expandvars(path))
match = RX_LETTER.match(path)
self._drive_letter = match.group(1) if match else None
remainder = re.sub(RX_LETTER, "", path)
self._absolute = remainder[0] in ["/", "\\"]
if ":" in remainder:
raise ValueError("Bad characters in path '{}'".format(remainder))
self._components = _normalize_dots(
[s for s in re.split("/|\\\\", remainder) if s]
)
self._depth = len(self._components)
def _construct_path(self, sep, with_drive_letter=True):
"""Reconstruct path for given path sep."""
result = sep.join(self._components)
if self._absolute:
result = "{}{}".format(sep, result)
if with_drive_letter and self._drive_letter:
result = "{}:{}".format(self._drive_letter, result)
return result
def posix_path(self, **kw):
"""Path with forward slashes. Can include drive letter."""
with_drive_letter = kw.get("with_drive", True)
return self._construct_path("/", with_drive_letter)
def windows_path(self, **kw):
"""Path with back slashes. Can include drive letter."""
with_drive_letter = kw.get("with_drive", True)
return self._construct_path("\\", with_drive_letter)
def os_path(self, **kw):
"""Path with slashes for current os. Can include drive letter."""
with_drive = kw.get("with_drive", True)
if os.name == "nt":
return self.windows_path(with_drive=with_drive)
return self.posix_path(with_drive=with_drive)
def startswith(self, path):
return self.posix_path().startswith(path.posix_path())
def endswith(self, suffix):
return self.posix_path().endswith(suffix)
def __len__(self):
return len(self.posix_path())
def __eq__(self, rhs):
if not isinstance(rhs, Path):
raise NotImplementedError
return self.posix_path() == rhs.posix_path()
def __ne__(self, rhs):
return not self == rhs
@property
def depth(self):
return self._depth
@property
def drive_letter(self):
return self._drive_letter or ""
@property
def absolute(self):
return self._absolute
@property
def relative(self):
return not self._absolute
@property
def components(self):
return self._components or []
@property
def all_components(self):
if self.drive_letter:
return ["{}:".format(self.drive_letter)] + self.components
else:
return self.components
@property
def tail(self):
return self._components[-1] if self._components else None
|
989,112 | 30ac9f930d7d47b33bb2b6383c3e36601e378545 | #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files("onefuzz")
|
989,113 | 9e53b92395755b64037d4bf26120b7dd50ebda30 | #coding:utf-8
import binascii,base64,pyDes,random
class DES(object):
def __init__(self, iv, key):
self.iv = iv
self.key = key
def encrypt(self, data):
iv = binascii.unhexlify(self.iv)
key = binascii.unhexlify(self.key)
k = pyDes.triple_des(key, pyDes.CBC, iv, pad=None, padmode=pyDes.PAD_PKCS5)
d = k.encrypt(data)
d = base64.encodestring(d)
return d
def decrypt(self, data):
iv = binascii.unhexlify(self.iv)
key = binascii.unhexlify(self.key)
k = pyDes.triple_des(key, pyDes.CBC, iv, pad=None, padmode=pyDes.PAD_PKCS5)
try:
data = base64.decodestring(data)
d = k.decrypt(data)
except:
d = ''
return d
def encrypt(s):
iv = str(random.randint(1132333435363738,3132333435363738))
key = str(random.randint(113233343536373839303132333435363738393031323334,313233343536373839303132333435363738393031323334))
iv = '1598539867510225'
key = '269512710688838203243831550735328182008578810396'
obj = DES(iv,key)
encryptdata = obj.encrypt(s.encode('utf-8')).strip()
return encryptdata,iv,key
def decrypt(s,iv,key):
s = '0V/hDHWDQbg='
iv = '1598539867510225'
key = '269512710688838203243831550735328182008578810396'
des = DES(iv,key)
decryptdata = des.decrypt(s)
return decryptdata.decode('utf-8')
if __name__ == '__main__':
encryptdata,iv,key = encrypt('test')
print encryptdata
print decrypt(encryptdata,iv,key)
|
989,114 | 1ddc3543eaabc2504f5e7c9854afec99abf24938 | # -*- coding: utf-8 -*-
# Author : Bikmamatov Ildar
# Module : django_ext
# Description : Функции для модуля websync.py
import os, json, re, sys, subprocess, time, traceback
import asyncio, yaml
#sys.path.insert(0, os.getcwd())
import libftp as aioftp
from .dict import *
def loadJsonFromString(data):
#data = re.sub(r'[\x00-\x20]+', ' ', data)
#data = re.sub(r'\s+', ' ', data)
data = data.strip()
data = re.sub(r',}', '}', data)
#print (data)
json_data = json.loads(data)
return json_data
def loadJsonFromFile(file_name):
file = open(path, 'rb')
data = file.read().decode('utf-8')
file.close()
return loadJsonFromString(data)
def loadYamlFromFile(path):
file = open(path, 'rb')
data = file.read().decode('utf-8')
file.close()
data = re.sub(r'\t', ' ', data)
cfg = yaml.load(data)
return cfg
def getFtpParamsByHost(cfg, host):
host = str(host)
ftp = xarr(cfg,'ftp',None,TypeObject)
ftp_params = xarr(ftp,host,None,TypeObject)
return ftp_params
def getProjectParams(cfg, project):
project = str(project)
projects = xarr(cfg,'projects',None,TypeObject)
project_CFG = xarr(projects,project,None,TypeObject)
return project_CFG
def getProjectFtpParamsByHost(cfg, project, host):
project_params = getProjectParams(cfg, project)
project_ftp_params = xarr(project_params, 'ftp', None, TypeObject)
ftp_host_params = xarr(project_ftp_params, host, None, TypeObject)
return ftp_host_params
# ---- Работа с ФТП ----
async def connect(host, port, username, password):
ftp = aioftp.Client()
await ftp.connect(host)
await ftp.login(username, password)
return ftp
class BaseDownloadClass:
def setConfig (self, config):
self.config = config
def __init__(self, *args, **kwargs):
self.config = None
self.host = None
self.port = None
self.username = None
self.password = None
self.exclude = None
self.localPath = None
self.downloadPath = None
class DownloadFtp(BaseDownloadClass):
def __init__(self, *args, **kwargs):
self.isStop = asyncio.Event()
self.isStop.clear()
self.future = None
self.loop = None
self.downloadQueue = asyncio.Queue()
self.downloadTasksCount=0
async def listTreeRecursive(self, ftp, path):
list_dir=[]
print ("Listing folder " + ConsoleColors.OKBLUE + path + ConsoleColors.ENDC)
tmp_path = str(path)[len(self.downloadPath):]
upload_path = joinPath(self.localPath, tmp_path)
mkdir(upload_path, isFile=False)
for path, info in (await ftp.list(path, recursive=False)):
path = str(path)
type = info.get('type')
#print (path)
#print (info)
if type == 'dir':
list_dir.append(path)
elif type == 'file':
await self.downloadQueue.put( ('file',path) )
elif 'symlink' in type:
await self.downloadQueue.put( ('symlink',path) )
pass
for path in list_dir:
await self.listTreeRecursive(ftp, path)
async def listTree(self, path):
try:
ftp = await connect(self.host, self.port, self.username, self.password)
await self.listTreeRecursive(ftp, path)
await ftp.quit()
except Exception as e:
traceback.print_exc()
self.isStop.set()
async def downloadFiles(self):
try:
ftp = await connect(self.host, self.port, self.username, self.password)
self.downloadTasksCount = self.downloadTasksCount + 1
currentIndex = self.downloadTasksCount;
while True:
try:
(type, download_path) = self.downloadQueue.get_nowait()
if type == 'file':
path = str(download_path)[len(self.downloadPath):]
upload_path = joinPath(self.localPath, path)
print ('['+str(currentIndex)+"] Download file " + ConsoleColors.OKGREEN + download_path + ConsoleColors.ENDC)
#await ftp.download(download_path, upload_path, write_into=True)
elif type == 'symlink':
info = await ftp.ls_stat(download_path)
print (info)
info = await ftp.stat(download_path)
print (info)
pass
except asyncio.QueueEmpty:
#print (self.isStop.is_set())
if self.isStop.is_set():
break;
await asyncio.sleep(1)
except Exception as e:
print ('['+str(currentIndex)+'] Error: ' + str(e))
traceback.print_exc()
#print ('exit from downloadFiles')
await ftp.quit()
pass
except Exception as e:
traceback.print_exc()
async def mainLoop(self):
tasks=[]
tasks.append(asyncio.ensure_future(self.listTree(self.downloadPath)))
for i in range(0,5):
tasks.append(asyncio.ensure_future(self.downloadFiles()))
await asyncio.wait(tasks)
async def gotResult(self, future):
#future.result()
await asyncio.sleep(2.0)
self.loop.stop()
def run(self, project, host):
ftp_params = getFtpParamsByHost(self.config, host)
project_params = getProjectParams(self.config, project)
project_ftp_params = getProjectFtpParamsByHost(self.config, project, host)
self.host = xarr(ftp_params, 'host', None, TypeString)
self.port = xarr(ftp_params, 'port', '22', TypeString)
self.username = xarr(ftp_params, 'user', None, TypeString)
self.password = xarr(ftp_params, 'pass', None, TypeString)
self.exclude = xarr(project_params,'exclude',None,TypeArray)
self.localPath = xarr(project_params,'local_path',None,TypeString)
self.downloadPath = xarr(project_ftp_params, 'download', None, TypeString)
if self.host == None or self.username == None or self.password == None:
print ('Ftp params for host ' + host + ' is not set ')
return 0
if self.localPath == None:
print ('Local path for project ' + project + ' is not set ')
return 0
if self.downloadPath == None:
print ('Ftp ' + host + ' for project ' + project + ' is not set ')
return 0
print ('Скачать по ФТП с сервера '+ConsoleColors.OKBLUE+self.host+ConsoleColors.ENDC+' из папки '+ConsoleColors.OKGREEN+self.downloadPath+ConsoleColors.ENDC+' в папку ' +ConsoleColors.WARNING+self.localPath+ConsoleColors.ENDC)
#if query_yes_no("Вы действительно хотите скачать с FTP данные?", "no"):
if True:
self.loop = asyncio.get_event_loop()
self.loop.run_until_complete(self.mainLoop())
"""
#asyncio.wait(tasks)
self.future = asyncio.Future()
self.future.add_done_callback(self.gotResult)
tasks = [
asyncio.ensure_future(self.downloadFiles()),
asyncio.ensure_future(self.mainLoop())
]
#asyncio.wait(tasks)
try:
self.loop.run_forever()
finally:
self.loop.close()
#loop.run_until_complete(self.main_loop())
"""
print ('End!')
pass
return 1
|
989,115 | aa3040585b40f6bfe86456410587754b5ffb2069 | from System import *
from System.Collections.Specialized import *
from System.IO import *
from System.Text import *
from Deadline.Scripting import *
from DeadlineUI.Controls.Scripting.DeadlineScriptDialog import DeadlineScriptDialog
# For Integration UI
import imp
import os
imp.load_source( 'IntegrationUI', RepositoryUtils.GetRepositoryFilePath( "submission/Integration/Main/IntegrationUI.py", True ) )
import IntegrationUI
########################################################################
## Globals
########################################################################
scriptDialog = None
settings = None
ProjectManagementOptions = ["Shotgun","FTrack"]
DraftRequested = False
########################################################################
## Main Function Called By Deadline
########################################################################
def __main__( *args ):
global scriptDialog
global settings
global integration_dialog
scriptDialog = DeadlineScriptDialog()
scriptDialog.SetTitle( "Submit Vue Job To Deadline" )
scriptDialog.SetIcon( scriptDialog.GetIcon( 'Vue' ) )
scriptDialog.AddTabControl("Tabs", 0, 0)
scriptDialog.AddTabPage("Job Options")
scriptDialog.AddGrid()
scriptDialog.AddControlToGrid( "Separator1", "SeparatorControl", "Job Description", 0, 0, colSpan=2 )
scriptDialog.AddControlToGrid( "NameLabel", "LabelControl", "Job Name", 1, 0, "The name of your job. This is optional, and if left blank, it will default to 'Untitled'.", False )
scriptDialog.AddControlToGrid( "NameBox", "TextControl", "Untitled", 1, 1 )
scriptDialog.AddControlToGrid( "CommentLabel", "LabelControl", "Comment", 2, 0, "A simple description of your job. This is optional and can be left blank.", False )
scriptDialog.AddControlToGrid( "CommentBox", "TextControl", "", 2, 1 )
scriptDialog.AddControlToGrid( "DepartmentLabel", "LabelControl", "Department", 3, 0, "The department you belong to. This is optional and can be left blank.", False )
scriptDialog.AddControlToGrid( "DepartmentBox", "TextControl", "", 3, 1 )
scriptDialog.EndGrid()
scriptDialog.AddGrid()
scriptDialog.AddControlToGrid( "Separator2", "SeparatorControl", "Job Options", 0, 0, colSpan=3 )
scriptDialog.AddControlToGrid( "PoolLabel", "LabelControl", "Pool", 1, 0, "The pool that your job will be submitted to.", False )
scriptDialog.AddControlToGrid( "PoolBox", "PoolComboControl", "none", 1, 1 )
scriptDialog.AddControlToGrid( "SecondaryPoolLabel", "LabelControl", "Secondary Pool", 2, 0, "The secondary pool lets you specify a Pool to use if the primary Pool does not have any available Slaves.", False )
scriptDialog.AddControlToGrid( "SecondaryPoolBox", "SecondaryPoolComboControl", "", 2, 1 )
scriptDialog.AddControlToGrid( "GroupLabel", "LabelControl", "Group", 3, 0, "The group that your job will be submitted to.", False )
scriptDialog.AddControlToGrid( "GroupBox", "GroupComboControl", "none", 3, 1 )
scriptDialog.AddControlToGrid( "PriorityLabel", "LabelControl", "Priority", 4, 0, "A job can have a numeric priority ranging from 0 to 100, where 0 is the lowest priority and 100 is the highest priority.", False )
scriptDialog.AddRangeControlToGrid( "PriorityBox", "RangeControl", RepositoryUtils.GetMaximumPriority() / 2, 0, RepositoryUtils.GetMaximumPriority(), 0, 1, 4, 1 )
scriptDialog.AddControlToGrid( "TaskTimeoutLabel", "LabelControl", "Task Timeout", 5, 0, "The number of minutes a slave has to render a task for this job before it requeues it. Specify 0 for no limit.", False )
scriptDialog.AddRangeControlToGrid( "TaskTimeoutBox", "RangeControl", 0, 0, 1000000, 0, 1, 5, 1 )
scriptDialog.AddSelectionControlToGrid( "AutoTimeoutBox", "CheckBoxControl", False, "Enable Auto Task Timeout", 5, 2, "If the Auto Task Timeout is properly configured in the Repository Options, then enabling this will allow a task timeout to be automatically calculated based on the render times of previous frames for the job. " )
scriptDialog.AddControlToGrid( "ConcurrentTasksLabel", "LabelControl", "Concurrent Tasks", 6, 0, "The number of tasks that can render concurrently on a single slave. This is useful if the rendering application only uses one thread to render and your slaves have multiple CPUs.", False )
scriptDialog.AddRangeControlToGrid( "ConcurrentTasksBox", "RangeControl", 1, 1, 16, 0, 1, 6, 1 )
scriptDialog.AddSelectionControlToGrid( "LimitConcurrentTasksBox", "CheckBoxControl", True, "Limit Tasks To Slave's Task Limit", 6, 2, "If you limit the tasks to a slave's task limit, then by default, the slave won't dequeue more tasks then it has CPUs. This task limit can be overridden for individual slaves by an administrator." )
scriptDialog.AddControlToGrid( "MachineLimitLabel", "LabelControl", "Machine Limit", 7, 0, "", False )
scriptDialog.AddRangeControlToGrid( "MachineLimitBox", "RangeControl", 0, 0, 1000000, 0, 1, 7, 1 )
scriptDialog.AddSelectionControlToGrid( "IsBlacklistBox", "CheckBoxControl", False, "Machine List Is A Blacklist", 7, 2, "" )
scriptDialog.AddControlToGrid( "MachineListLabel", "LabelControl", "Machine List", 8, 0, "Use the Machine Limit to specify the maximum number of machines that can render your job at one time. Specify 0 for no limit.", False )
scriptDialog.AddControlToGrid( "MachineListBox", "MachineListControl", "", 8, 1, colSpan=2 )
scriptDialog.AddControlToGrid( "LimitGroupLabel", "LabelControl", "Limits", 9, 0, "The Limits that your job requires.", False )
scriptDialog.AddControlToGrid( "LimitGroupBox", "LimitGroupControl", "", 9, 1, colSpan=2 )
scriptDialog.AddControlToGrid( "DependencyLabel", "LabelControl", "Dependencies", 10, 0, "Specify existing jobs that this job will be dependent on. This job will not start until the specified dependencies finish rendering. ", False )
scriptDialog.AddControlToGrid( "DependencyBox", "DependencyControl", "", 10, 1, colSpan=2 )
scriptDialog.AddControlToGrid( "OnJobCompleteLabel", "LabelControl", "On Job Complete", 11, 0, "If desired, you can automatically archive or delete the job when it completes. ", False )
scriptDialog.AddControlToGrid( "OnJobCompleteBox", "OnJobCompleteControl", "Nothing", 11, 1 )
scriptDialog.AddSelectionControlToGrid( "SubmitSuspendedBox", "CheckBoxControl", False, "Submit Job As Suspended", 11, 2, "If enabled, the job will submit in the suspended state. This is useful if you don't want the job to start rendering right away. Just resume it from the Monitor when you want it to render. " )
scriptDialog.EndGrid()
scriptDialog.AddGrid()
scriptDialog.AddControlToGrid( "Separator3", "SeparatorControl", "Vue Options", 0, 0, colSpan=3 )
scriptDialog.AddControlToGrid( "SceneLabel", "LabelControl", "Vue File", 1, 0, "The Vue scene file to be rendered. ", False )
scriptDialog.AddSelectionControlToGrid( "SceneBox", "MultiFileBrowserControl", "", "Vue Files (*.vue);;All Files (*)", 1, 1, colSpan=2 )
scriptDialog.AddControlToGrid("OutputLabel","LabelControl","Output Folder",2, 0, "The folder where your output will be dumped (this should be a shared folder on the network). ", False)
scriptDialog.AddSelectionControlToGrid("OutputBox","FolderBrowserControl", "","", 2, 1, colSpan=2)
scriptDialog.AddControlToGrid( "FramesLabel", "LabelControl", "Frame List", 3, 0, "The list of frames to render.", False )
scriptDialog.AddControlToGrid( "FramesBox", "TextControl", "", 3, 1 )
overrideOutputBox = scriptDialog.AddSelectionControlToGrid("OverrideOutputBox","CheckBoxControl",False,"Override Output Path", 3, 2, "Whether or not to override the output path specified in the scene.")
overrideOutputBox.ValueModified.connect(OverrideOutputBoxChanged)
scriptDialog.AddControlToGrid( "ChunkSizeLabel", "LabelControl", "Frames Per Task", 4, 0, "This is the number of frames that will be rendered at a time for each job task. ", False )
scriptDialog.AddRangeControlToGrid( "ChunkSizeBox", "RangeControl", 1, 1, 1000000, 0, 1, 4, 1 )
scriptDialog.AddSelectionControlToGrid("SubmitSceneBox","CheckBoxControl",False,"Submit Vue Scene", 4, 2, "If this option is enabled, the scene file will be submitted with the job, and then copied locally to the slave machine during rendering.")
scriptDialog.AddControlToGrid( "VersionLabel", "LabelControl", "Version", 5, 0, "The version of Vue to render with.", False )
scriptDialog.AddComboControlToGrid( "VersionBox", "ComboControl", "2016", ("7","8","9","10","11","2014","2015","2016"), 5, 1 )
animationBox = scriptDialog.AddSelectionControlToGrid("AnimationBox","CheckBoxControl",True,"Render Animation Sequence", 5, 2, "Whether or not to render the full animation instead of a single frame.")
animationBox.ValueModified.connect(AnimationChanged)
scriptDialog.AddControlToGrid( "BuildLabel", "LabelControl", "Build To Force", 6, 0, "You can force 32 or 64 bit rendering with this option.", False )
scriptDialog.AddComboControlToGrid( "BuildBox", "ComboControl", "None", ("None","32bit","64bit"), 6, 1 )
scriptDialog.EndGrid()
scriptDialog.EndTabPage()
integration_dialog = IntegrationUI.IntegrationDialog()
integration_dialog.AddIntegrationTabs( scriptDialog, "VueMonitor", DraftRequested, ProjectManagementOptions, failOnNoTabs=False )
scriptDialog.EndTabControl()
scriptDialog.AddGrid()
scriptDialog.AddHorizontalSpacerToGrid( "HSpacer1", 0, 0 )
submitButton = scriptDialog.AddControlToGrid( "SubmitButton", "ButtonControl", "Submit", 0, 1, expand=False )
submitButton.ValueModified.connect(SubmitButtonPressed)
closeButton = scriptDialog.AddControlToGrid( "CloseButton", "ButtonControl", "Close", 0, 2, expand=False )
closeButton.ValueModified.connect(scriptDialog.closeEvent)
scriptDialog.EndGrid()
#Application Box must be listed before version box or else the application changed event will change the version
settings = ("DepartmentBox","CategoryBox","PoolBox","SecondaryPoolBox","GroupBox","PriorityBox","MachineLimitBox","IsBlacklistBox","MachineListBox","LimitGroupBox","SceneBox","OutputBox","OverrideOutputBox","AnimationBox","SubmitSceneBox","FramesBox","ChunkSizeBox","VersionBox","BuildBox")
scriptDialog.LoadSettings( GetSettingsFilename(), settings )
scriptDialog.EnabledStickySaving( settings, GetSettingsFilename() )
AnimationChanged( None )
OverrideOutputBoxChanged( None )
if len( args ) > 0:
scriptDialog.SetValue( "SceneBox", args[0] )
if len( args ) > 1:
scriptDialog.SetValue( "NameBox", args[1] )
if len( args ) > 2:
scriptDialog.SetValue( "FramesBox", "0-" + str(int(args[2])-1) )
if len( args ) > 3:
scriptDialog.SetValue( "VersionBox", args[3] )
if len( args ) > 4:
scriptDialog.SetValue( "BuildBox", args[4] )
scriptDialog.ShowDialog( len( args ) > 0 )
def AnimationChanged(*args):
enabled = scriptDialog.GetValue("AnimationBox")
scriptDialog.SetEnabled( "FramesBox", enabled )
scriptDialog.SetEnabled( "ChunkSizeBox", enabled )
def OverrideOutputBoxChanged(*args):
scriptDialog.SetEnabled( "OutputLabel", scriptDialog.GetValue( "OverrideOutputBox" ) )
scriptDialog.SetEnabled( "OutputBox", scriptDialog.GetValue( "OverrideOutputBox" ) )
def GetSettingsFilename():
return Path.Combine( ClientUtils.GetUsersSettingsDirectory(), "VueSettings.ini" )
def SubmitButtonPressed(*args):
global scriptDialog
global shotgunSettings
# Check if vue files exist.
sceneFiles = StringUtils.FromSemicolonSeparatedString( scriptDialog.GetValue( "SceneBox" ), False )
if( len( sceneFiles ) == 0 ):
scriptDialog.ShowMessageBox( "No vue file specified", "Error" )
return
for sceneFile in sceneFiles:
if( not File.Exists( sceneFile ) ):
scriptDialog.ShowMessageBox( "Vue file %s does not exist" % sceneFile, "Error" )
return
elif (not scriptDialog.GetValue("SubmitSceneBox") and PathUtils.IsPathLocal(sceneFile)):
result = scriptDialog.ShowMessageBox( "Vue file %s is local. Are you sure you want to continue" % sceneFile, "Warning", ("Yes","No") )
if(result=="No"):
return
# Check output path
overrideOutput = bool(scriptDialog.GetValue( "OverrideOutputBox" ))
outputPath = scriptDialog.GetValue( "OutputBox" ).strip()
if overrideOutput:
if len(outputPath) == 0:
scriptDialog.ShowMessageBox( "Please specify an output Path.", "Error" )
return
# Check if output path is local
if( PathUtils.IsPathLocal(outputPath) ):
result = scriptDialog.ShowMessageBox( "The output path %s is local. Are you sure you want to continue?" % outputPath, "Warning", ("Yes","No") )
if(result=="No"):
return
# Check if Integration options are valid.
if not integration_dialog.CheckIntegrationSanity( ):
return
# Check if a valid frame range has been specified.
frames = scriptDialog.GetValue( "FramesBox" )
if( not FrameUtils.FrameRangeValid( str(frames) ) ):
scriptDialog.ShowMessageBox( "Frame range %s is not valid" % str(frames), "Error" )
return
successes = 0
failures = 0
# Submit each scene file separately.
for sceneFile in sceneFiles:
jobName = scriptDialog.GetValue( "NameBox" )
if len(sceneFiles) > 1:
jobName = jobName + " [" + Path.GetFileName( sceneFile ) + "]"
# Create job info file.
jobInfoFilename = Path.Combine( ClientUtils.GetDeadlineTempPath(), "vue_job_info.job" )
writer = StreamWriter( jobInfoFilename, False, Encoding.Unicode )
writer.WriteLine( "Plugin=Vue" )
writer.WriteLine( "Name=%s" % jobName )
writer.WriteLine( "Comment=%s" % scriptDialog.GetValue( "CommentBox" ) )
writer.WriteLine( "Department=%s" % scriptDialog.GetValue( "DepartmentBox" ) )
writer.WriteLine( "Pool=%s" % scriptDialog.GetValue( "PoolBox" ) )
writer.WriteLine( "SecondaryPool=%s" % scriptDialog.GetValue( "SecondaryPoolBox" ) )
writer.WriteLine( "Group=%s" % scriptDialog.GetValue( "GroupBox" ) )
writer.WriteLine( "Priority=%s" % scriptDialog.GetValue( "PriorityBox" ) )
writer.WriteLine( "TaskTimeoutMinutes=%s" % scriptDialog.GetValue( "TaskTimeoutBox" ) )
writer.WriteLine( "EnableAutoTimeout=%s" % scriptDialog.GetValue( "AutoTimeoutBox" ) )
writer.WriteLine( "ConcurrentTasks=%s" % scriptDialog.GetValue( "ConcurrentTasksBox" ) )
writer.WriteLine( "LimitConcurrentTasksToNumberOfCpus=%s" % scriptDialog.GetValue( "LimitConcurrentTasksBox" ) )
writer.WriteLine( "MachineLimit=%s" % scriptDialog.GetValue( "MachineLimitBox" ) )
if( bool(scriptDialog.GetValue( "IsBlacklistBox" )) ):
writer.WriteLine( "Blacklist=%s" % scriptDialog.GetValue( "MachineListBox" ) )
else:
writer.WriteLine( "Whitelist=%s" % scriptDialog.GetValue( "MachineListBox" ) )
writer.WriteLine( "LimitGroups=%s" % scriptDialog.GetValue( "LimitGroupBox" ) )
writer.WriteLine( "JobDependencies=%s" % scriptDialog.GetValue( "DependencyBox" ) )
writer.WriteLine( "OnJobComplete=%s" % scriptDialog.GetValue( "OnJobCompleteBox" ) )
if overrideOutput:
writer.WriteLine( "OutputFilename0=%s" % outputPath )
if( bool(scriptDialog.GetValue( "SubmitSuspendedBox" )) ):
writer.WriteLine( "InitialStatus=Suspended" )
if(scriptDialog.GetValue("AnimationBox")):
writer.WriteLine("Frames=" + str(frames))
writer.WriteLine("ChunkSize=%s" % scriptDialog.GetValue( "ChunkSizeBox"))
else:
writer.WriteLine("Frames=0")
writer.WriteLine("ChunkSize=1")
#Shotgun
extraKVPIndex = 0
groupBatch = False
if integration_dialog.IntegrationProcessingRequested():
extraKVPIndex = integration_dialog.WriteIntegrationInfo( writer, extraKVPIndex )
groupBatch = groupBatch or integration_dialog.IntegrationGroupBatchRequested()
if groupBatch:
writer.WriteLine( "BatchName=%s\n" % (jobName ) )
writer.Close()
# Create plugin info file.
pluginInfoFilename = Path.Combine( ClientUtils.GetDeadlineTempPath(), "vue_plugin_info.job" )
writer = StreamWriter( pluginInfoFilename, False, Encoding.Unicode )
if( not scriptDialog.GetValue("SubmitSceneBox") ):
writer.WriteLine( "SceneFile=" + sceneFile )
if overrideOutput:
writer.WriteLine( "OutputPath=%s" % outputPath )
writer.WriteLine( "OverrideOutputBox=%s" % overrideOutput )
writer.WriteLine( "Version=%s" % scriptDialog.GetValue("VersionBox") )
writer.WriteLine( "Build=%s" % scriptDialog.GetValue("BuildBox") )
writer.WriteLine( "Animation=%s" % scriptDialog.GetValue("AnimationBox") )
writer.Close()
# Setup the command line arguments.
arguments = StringCollection()
arguments.Add( jobInfoFilename )
arguments.Add( pluginInfoFilename )
if scriptDialog.GetValue( "SubmitSceneBox" ):
arguments.Add( sceneFile )
if( len( sceneFiles ) == 1 ):
results = ClientUtils.ExecuteCommandAndGetOutput( arguments )
scriptDialog.ShowMessageBox( results, "Submission Results" )
else:
# Now submit the job.
exitCode = ClientUtils.ExecuteCommand( arguments )
if( exitCode == 0 ):
successes = successes + 1
else:
failures = failures + 1
if( len( sceneFiles ) > 1 ):
scriptDialog.ShowMessageBox( "Jobs submitted successfully: %d\nJobs not submitted: %d" % (successes, failures), "Submission Results" )
|
989,116 | bfc201389201f18552f7bfea75976f8eca27a470 | # STRETCH: implement Linear Search
def linear_search(arr, target):
# TO-DO: add missing code
for i in range(len(arr)):
if arr[i] == target:
return i
return -1 # not found
# STRETCH: write an iterative implementation of Binary Search
def binary_search(arr, target):
if len(arr) == 0:
return -1 # array empty
low = 0
high = len(arr)-1
found = False
# Loop through array, breaking it up
while found == False:
mid = int(high/2)
# Check middle:
if arr[mid] == target:
return mid
elif arr[mid] > target:
# Discard rhs
high = mid
else:
# Discard lhs
low = mid
return -1 # not found
# STRETCH: write a recursive implementation of Binary Search
def binary_search_recursive(arr, target, low, high):
middle = (low+high)//2
if len(arr) == 0:
return -1 # array empty
if arr[middle] == target:
return middle
elif arr[middle] > target:
high = middle
else:
low = middle
return binary_search_recursive(arr, target, low, high)
|
989,117 | f308676bae67b146eb0e9cda9f344dfb73238e13 | import sklearn
from sklearn import preprocessing, tree
from sklearn.tree import DecisionTreeClassifier
import imblearn #solve imbalance
from imblearn.over_sampling import SMOTE
import numpy as np
import pandas as pd
import random
from itertools import zip_longest
import storm
############### Constanten ###############
#Verander pad
database_location = "C:/Users/Arthu/Desktop/thesis_realtime_ids/csecicids2018-clean/"
DATA_0302 = database_location + "Friday-02-03-2018_TrafficForML_CICFlowMeter.csv"
DATA_0301 = database_location + "Thursday-01-03-2018_TrafficForML_CICFlowMeter.csv"
DATA_0228 = database_location + "Wednesday-28-02-2018_TrafficForML_CICFlowMeter.csv"
DATA_0223 = database_location + "Friday-23-02-2018_TrafficForML_CICFlowMeter.csv"
DATA_0222 = database_location + "Thursday-22-02-2018_TrafficForML_CICFlowMeter.csv"
DATA_0221 = database_location + "Wednesday-21-02-2018_TrafficForML_CICFlowMeter.csv"
DATA_0220 = database_location + "Tuesday-20-02-2018_TrafficForML_CICFlowMeter.csv"
DATA_0216 = database_location + "Friday-16-02-2018_TrafficForML_CICFlowMeter.csv"
DATA_0215 = database_location + "Thursday-15-02-2018_TrafficForML_CICFlowMeter.csv"
DATA_0214 = database_location + "Wednesday-14-02-2018_TrafficForML_CICFlowMeter.csv"µ
datasets = [DATA_0302, DATA_0301, DATA_0228, DATA_0222, DATA_0221, DATA_0220, DATA_0216, DATA_0215, DATA_0214]
dataset_names = ["DATA_0302", "DATA_0301", "DATA_0228", "DATA_0223", "DATA_0222", "DATA_0221", "DATA_0220", "DATA_0216",
"DATA_0215", "DATA_0214"]
#Herlabellen aanvals categoriëen (15) tot 6 klassen
dict_category = {
"Benign" : "Benign",
"Bot" : "Bot", #02/03
"Infilteration" : "Infilteration", #28/02,01/03
"SQL Injection" : "SQL Injection", #22/02,23/02
"Brute Force -Web" : "Brute Force", #22/02,23/02
"Brute Force -XSS" : "Brute Force", #22/02,23/02
"FTP-BruteForce" : "Brute Force", #14/02
"SSH-Bruteforce" : "Brute Force", #14/02
"DDOS attack-LOIC-UDP" : "DOS",#20/02,21/02
"DDOS attack-HOIC" : "DOS", #21/02
"DDoS attacks-LOIC-HTTP" : "DOS", #20/02
"DoS attacks-SlowHTTPTest" : "DOS", #16/02
"DoS attacks-Hulk" : "DOS", #16/02
"DoS attacks-Slowloris" : "DOS", #15/02
"DoS attacks-GoldenEye" : "DOS" #15/02
}
#Kies de gewenste target klasse
dict_binary = {
"Benign" : 0,
"Bot" : 1,
"Infilteration" : 0,
"SQL Injection" : 0,
"Brute Force" : 0,
"DOS" : 0
}
############### Hulp Functies ###############
def read_random(filename,sample_size):
if sample_size is None:
df = pd.read_csv(filename)
else:
n = sum(1 for line in open(filename)) - 1 #number of records in file (excludes header)
skip = sorted(random.sample(range(1,n+1),n-sample_size)) #the 0-indexed header will not be included in the skip list
df = pd.read_csv(filename, skiprows=skip)
return df
def relabel_minorities(labels):
relabelled = []
for i in labels:
relabelled.append(dict_category[i])
#Numpy array
return np.array(relabelled)
def encode_to_binary_classification(y_train,y_test):
#Encode output labels
y_train_encoded = []
y_test_encoded = []
for i,j in zip_longest(y_train,y_test, fillvalue="end"):
if i != "end":
y_train_encoded.append(dict_binary[i])
if j != "end":
y_test_encoded.append(dict_binary[j])
return (y_train_encoded,y_test_encoded)
############### Implementatie ###############
class BoltPython(storm.BasicBolt):
def initialize(self, conf, context):
self._conf = conf
self._context = context
storm.logInfo("Bolt starting...")
#Read dataset
df = None
df_next = None
for i in datasets:
if df is None:
df = read_random(i,20000)
df_next = df
else:
df_next = read_random(i,20000)
df = pd.concat([df,df_next])
df_next = pd.read_csv(DATA_0223,skiprows=range(1,1500),nrows=20000)
df = pd.concat([df, df_next])
df = df.drop(df.columns[2],axis=1) #drop timestamp
labels = df.iloc[:,-1].values
labels = relabel_minorities(labels)
#Binary labels
labels_to_binary = []
for i in labels:
labels_to_binary.append(dict_binary[i])
unique_df = pd.DataFrame(data=labels[1:], columns=["Label"])
y = labels_to_binary
X = df.iloc[:,:-1]
#Oversampling + decision tree
oversample = SMOTE(sampling_strategy=1)
x_train, y_train = oversample.fit_resample(X,y)
DT = DecisionTreeClassifier()
DT.fit(np.array(x_train), np.array(y_train))
self._DT = DT
storm.logInfo("Bolt ready...")
def process(self, tuple):
#Read spout tuple
network_line = tuple.values[0]
storm.logInfo("Processing tuple: " + network_line)
features = np.array(network_line.split(','))
features = np.delete(features,[2,79],None)
#Decision tree prediction
prediction = self._DT.predict(features.reshape(1,-1))
#Convert prediction to json serializable list
storm.emit(prediction.tolist())
BoltPython().run() |
989,118 | 480a5869e2ef0c60d077be64f6253b99f6b9180d | # Generated by Django 2.2.5 on 2019-10-14 23:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0007_movieinstance_borrower'),
]
operations = [
migrations.AlterModelOptions(
name='movieinstance',
options={'ordering': ['due_date'], 'permissions': (('can_mark_returned', 'Set book as returned'),)},
),
]
|
989,119 | c8d2fdbaeb2106f8686210bc5ed3cd54c6669d3f | from functools import reduce
import math
def question(n):
return reduce(lambda x,y:(x*y)//math.gcd(x,y),range(1,n+1))
print(question(20))
|
989,120 | 6127ede6dd0b02f51da12fd14c8f6c44882bf70d | import csv
import gzip
import os
import sys
import time
import datetime
sys.path.append("/fraud_model/Code/tools/csv_operations")
import csv_ops
from csv_ops import *
from multiprocessing import Pool
global work_dir
work_dir="/fraud_model/Data/Raw_Data/merged_data_w_tmxrc/"
def merge_all_data_sources( day_start, n_Days):
day=day_start#start date
nDays=n_Days # number of days to process
for iDay in range(nDays):
file1="/fraud_model/Data/Raw_Data/signals/fraud_signal_flat_"+str(day)+".csv.gz"
file2="/fraud_model/Data/Raw_Data/threatmetrix_payer_w_tmxrc/threatmetrix_payer_flat_"+str(day)+".csv.gz"
file3="/fraud_model/Data/Raw_Data/threatmetrix_payee_w_tmxrc/threatmetrix_payee_flat_"+str(day)+".csv.gz"
file_out=work_dir+"signals_threatmetrix_payer_payee_"+str(day)+".csv.gz"
file_out_tmp= work_dir+"merge_tmp_"+str(day)+".csv.gz"
key_list=['payment_request_id']
# merge signal and payer threatmetrix
t0=time.time()
print "Merging signal and payer threatmetrix for "+str(day)
csv_merge(file1, file2, key_list, file_out_tmp)
print "Merging signal and payer threatmetrix done; time lapsed: ",time.time()-t0,'sec'
# merge above results wtih payee threatmetrix
print "Merge all three data sources for "+str(day)
csv_merge(file_out_tmp, file3, key_list, file_out)
print "Merge all three data sources done ; total time lapsed: ",time.time()-t0,'sec'
#delete intermediate file
cmdout=os.system('rm '+file_out_tmp.replace(" ","\ "))
#increment day by one
day = day+datetime.timedelta(1)
def merge_all_data_sources_helper(arg):
merge_all_data_sources(arg,1)
# last day of the perirod
if len(sys.argv) <=1: # if last day is not specified by stdin
year=2015
month=4
day=30
nDays = 30
else:
year=int(sys.argv[1])
month=int(sys.argv[2])
day=int(sys.argv[3])
nDays=int(sys.argv[4])
print "first day to merge:",year,'-',month,'-',day
nWorkers = 8
dayEnd = datetime.date(year, month, day)
# prepare datelist to roll up, skip dates that already have been rolled up
dateList = []
for i in range(nDays):
dayToProcess=dayEnd-datetime.timedelta(i)
if os.path.exists(work_dir + "signals_threatmetrix_payer_payee_"+str(dayToProcess)+".csv.gz"):
print "signals_threatmetrix_payer_payee_"+str(dayToProcess)+".csv.gz"," already exits, skipping ..."
else:
print "signals_threatmetrix_payer_payee_"+str(dayToProcess)+".csv.gz"," rollup will be processed"
dateList.append(dayToProcess)
pool = Pool(processes=nWorkers)
pool.map(merge_all_data_sources_helper, dateList)
|
989,121 | d1464fc7145345727a8fa443d7a06be813378803 | # Generated by Django 2.1.7 on 2019-03-08 10:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('mobile_number', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='seller',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('rate', models.IntegerField(max_length=25)),
('images', models.FileField(blank=True, null=True, upload_to='static/events/images')),
('city', models.CharField(max_length=25)),
],
),
]
|
989,122 | b60fb57b07399f31ed587e17c032bf7ec51642d7 | import pytesseract
try:
import Image
except ImportError:
from PIL import Image
def resolve(path):
try:
return str(
pytesseract.image_to_string(Image.open(path))
).strip()
except Exception:
return "ERROR"
|
989,123 | 08696a23c61c0bb61501d67fe77bba4a1d5767f2 | from preprocess import detect_image_counts, cut_image
import os
import cv2
pwd = os.getcwd()
image_index = 1
def cut_images_save(img, if_show_pre=False, if_show=False, img_name='', save_path=''):
global image_index
print('**********************开始处理图片*************************')
print('img_name: ', img_name)
wrap = detect_image_counts(img, if_show_pre, if_show, img_name)
print('wrap: ', wrap)
if wrap == 2:
print('-------------此图片有两张发票----------------')
imgs_list = cut_image(img)
for img in imgs_list:
cv2.imwrite(save_path + str(image_index) + '.png', img)
image_index += 1
else:
cv2.imwrite(save_path + str(image_index) + '.jpg', img)
image_index += 1
print('***************************************处理图片完成*************************************')
if __name__ == '__main__':
file_path = './pictures/2021_4_29.jpg'
crops_save_path = './results/crops/'
img = cv2.imread(file_path)
cut_images_save(img, False, 'ss', crops_save_path)
|
989,124 | 6f5dfbf3e4464a2b737f2cb17e7052f1e785a57d | import asyncio
async def f():
print("One", id(asyncio.current_task()))
await asyncio.sleep(1)
def main():
# a = f() # RuntimeWarning: coroutine 'f' was never awaited
# loop = asyncio.get_running_loop() # RuntimeError: no running event loop
loop = asyncio.get_event_loop()
# loop.call_soon(f) # RuntimeWarning: coroutine 'f' was never awaited
loop.run_forever()
if __name__ == '__main__':
main()
|
989,125 | a27f4bb4f1128146da234f8ca002ef31661c3a12 | from flask import Flask
import settings
from apps.article.view import article_bp
from apps.goods.view import goods_bp
from apps.user.view import user_bp
from exts import db
def create_app():
app = Flask(__name__, template_folder='../templates', static_folder='../static')
app.config.from_object(settings.DevelopmentConfig)
app.register_blueprint(user_bp)
app.register_blueprint(article_bp)
app.register_blueprint(goods_bp)
db.init_app(app)
return app
|
989,126 | cc659c47b3d0c9c5e2177a0bb7c86b7d9f0e1e01 | from distutils.core import setup
setup(
name='lbplot',
version='1.0',
description='LBANN output plotting utilities.',
author='Luke Jaffe',
author_email='lukejaffe@users.noreply.github.com',
packages=['lbplot'],
package_dir={'lbplot': 'src'},
scripts=['src/script/lbplot']
)
|
989,127 | da69598344ecb680d17c643105134ae9b25b391e | """
Dump out the cases data so we can see what is going on in there.
"""
import os, sys
import pandas as pd
import read_cases
if __name__ == "__main__":
df = read_cases.read_local_cases()
print(df)
|
989,128 | cf123e80d377ec6d7e8a5b6dac61a134ebcb82d3 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
import sys
import click
from flask_wtf import FlaskForm
from wtforms import SubmitField, TextAreaField,StringField,BooleanField,PasswordField
from wtforms.validators import DataRequired, Length
from flask import redirect, url_for, abort, render_template, flash,request
import pymysql
from flask_ckeditor import CKEditor
from flask_ckeditor import CKEditorField
from datetime import datetime
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from faker import Faker
from flask_wtf import CSRFProtect
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager
from flask_login import UserMixin
from flask_login import login_user, logout_user, login_required, current_user
from urllib.parse import urlparse, urljoin
app = Flask(__name__)
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY', 'secret string')
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', prefix + os.path.join(app.root_path, 'data.db'))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['CKEDITOR_SERVE_LOCAL'] = True
app.config['NOTE_POST_PER_PAGE'] = 10
db = SQLAlchemy(app)
ckeditor = CKEditor(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
csrf = CSRFProtect(app)
login_manager = LoginManager(app)
class Note(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
title = db.Column(db.String(20))
timestamp = db.Column(db.DateTime, default=datetime.utcnow, index=True)
def __repr__(self):
return '<Note %r>' % self.body
@app.cli.command()
@click.option('--drop', is_flag=True, help='Create after drop.')
def initdb(drop):
if drop:
db.drop_all()
db.create_all()
click.echo('Initialized database.')
@app.cli.command()
@click.option('--count', default=20, help='Quantity of messages, default is 20.')
def forge(count):
db.drop_all()
db.create_all()
fake = Faker('zh_CN')
click.echo('Working...')
for i in range(count):
note = Note(
title=fake.name(),
body=fake.sentence(),
timestamp=fake.date_time_this_year()
)
db.session.add(note)
db.session.commit()
click.echo('Created %d fake notes.' % count)
class NewNoteForm(FlaskForm):
title = StringField('Name', validators=[DataRequired(), Length(1, 20)])
body = CKEditorField('Body', validators=[DataRequired()])
submit = SubmitField('Save')
@app.route('/new', methods=['GET', 'POST'])
def new_note():
form = NewNoteForm()
if form.validate_on_submit():
title = form.title.data
body = form.body.data
note = Note(body=body,title=title)
db.session.add(note)
db.session.commit()
flash('Your note is saved.')
return redirect(url_for('index'))
return render_template('new_note.html', form=form)
@app.route('/')
def index():
form = DeleteNoteForm()
page = request.args.get('page', 1, type=int)
per_page = app.config['NOTE_POST_PER_PAGE']
pagination = Note.query.order_by(Note.timestamp.desc()).paginate(page, per_page=per_page)
notes = pagination.items
return render_template('index.html', notes=notes, form=form,pagination=pagination)
class EditNoteForm(NewNoteForm):
submit = SubmitField('Update')
@app.route('/edit/<int:note_id>', methods=['GET', 'POST'])
@login_required
def edit_note(note_id):
form = EditNoteForm()
note = Note.query.get(note_id)
if form.validate_on_submit():
note.title = form.title.data
note.body = form.body.data
db.session.commit()
flash('Your note is updated.')
return redirect(url_for('index'))
form.title.data = note.title
form.body.data = note.body
return render_template('edit_note.html', form=form)
class DeleteNoteForm(FlaskForm):
submit = SubmitField('Delete')
@app.route('/delete/<int:note_id>', methods=['POST'])
@login_required
def delete_note(note_id):
form = DeleteNoteForm()
if form.validate_on_submit():
note = Note.query.get(note_id)
db.session.delete(note)
db.session.commit()
flash('Your note is deleted.')
else:
abort(400)
return redirect(url_for('index'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
class Admin(db.Model,UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20))
password_hash = db.Column(db.String(128))
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def validate_password(self, password):
return check_password_hash(self.password_hash, password)
@app.cli.command()
@click.option('--username', prompt=True, help='The username used to login.')
@click.option('--password', prompt=True, hide_input=True,
confirmation_prompt=True, help='The password used to login.')
def initadmin(username, password):
admin = Admin.query.first()
if admin is not None:
click.echo('The administrator already exists, updating...')
admin.username = username
admin.set_password(password)
else:
click.echo('Creating the temporary administrator account...')
admin = Admin(
username=username)
admin.set_password(password)
db.session.add(admin)
db.session.commit()
click.echo('Done.')
@login_manager.user_loader
def load_user(user_id):
user = Admin.query.get(int(user_id))
return user
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
username = form.username.data
password = form.password.data
remember = form.remember.data
admin = Admin.query.first()
if admin:
if username == admin.username and admin.validate_password(password):
login_user(admin, remember)
flash('Welcome back.', 'info')
return redirect_back()
flash('Invalid username or password.')
else:
flash('No account.', 'warning')
return render_template('login.html', form=form)
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(1, 20)])
password = PasswordField('Password', validators=[DataRequired(), Length(1, 128)])
remember = BooleanField('Remember me')
submit = SubmitField('Log in')
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('Logout success.')
return redirect_back()
login_manager.login_view = 'login'
login_manager.login_message = '请登录'
login_manager.login_message_category = 'warning'
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def redirect_back(default='index', **kwargs):
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return redirect(target)
return redirect(url_for(default, **kwargs))
@app.context_processor
def make_template_context():
admin = Admin.query.first()
return dict(admin=admin)
|
989,129 | 6a4f9feb318c6820ddf70dab633ee3efdf42b5e3 | import os
import re
import time
import win32com.client
from datetime import datetime,timedelta
from reportMail import mailData, HtmlMailReport
import configparser
class CheckMailer:
def __init__(self,daysOfReport:int=0,folderindex=6):
self.outlook = win32com.client.Dispatch('Outlook.Application').GetNamespace("MAPI")
self.inbox = self.outlook.GetDefaultFolder(folderindex)
self.sendBox = self.outlook.GetDefaultFolder(5)
self.messages = self.inbox.Items
self.sendMessages = self.sendBox.Items
self.messages.Sort("ReceivedTime", True)
self.sendMessages.Sort("ReceivedTime", True)
self.daysOfReport=daysOfReport
self.totalHours = time.localtime().tm_hour
self.totalHours += 24
if daysOfReport !=0:
received_dt = datetime.now() - timedelta(days=self.daysOfReport)
received_dt = received_dt.strftime('%Y/%m/%d %H:%M %p')
self.messages = self.messages.Restrict("[ReceivedTime] >= '" + received_dt + "'")
self.sendMessages = self.sendMessages.Restrict("[ReceivedTime] >= '" + received_dt + "'")
else:
received_dt = datetime.now() - timedelta(hours=self.totalHours)
received_dt = received_dt.strftime('%Y/%m/%d %H:%M %p')
self.messages = self.messages.Restrict("[ReceivedTime] >= '" + received_dt + "'")
self.sendMessages = self.sendMessages.Restrict("[ReceivedTime] >= '" + received_dt + "'")
def getInBoxMesages(self):
return self.messages
def msgFoundInSentFolde(self,inboxMsgSub):
for i in self.sendMessages:
sub=i.Subject
if sub.find(inboxMsgSub) != -1:
sentTime=i.ReceivedTime.strftime("%Y-%m-%d %H:%M:%S")
if sub.find("FW:") != -1:
return "YESFW::" + str(sentTime)
elif sub.find("RE:") != -1:
return "YESRE::" + str(sentTime)
else:
return "YES::" + str(sentTime)
return "NO"
def testSendMsg(self,mailId='rsukanya@netapp.com'):
#Firstly, your code will fail if you have an item other than MailItem in the folder, such as ReportItem, MeetingItem, etc.
# You need to check the Class property. Secondly, you need to check the sender email address type and use the
# SenderEmailAddress only for the "SMTP" address type
foundMailId=''
for i in self.messages:
if i.Class == 43:
if i.SenderEmailType == "EX":
if i.Sender.GetExchangeUser() is not None:
print(i.Sender.GetExchangeUser().PrimarySmtpAddress)
if i.Sender.GetExchangeUser().PrimarySmtpAddress == mailId:
return True
else:
print(i.Sender.GetExchangeDistributionList().PrimarySmtpAddress)
if i.Sender.GetExchangeDistributionList().PrimarySmtpAddress == mailId:
return True
else:
print(i.SenderEmailAddress)
if str(i.SenderEmailAddress) == mailId:
return True
def getInBox(self):
return self.inbox
def getInboxItem(self):
return self.inbox.Items
def showInboxFolders(self):
inBoxFolder=self.getInBox()
[print(ib) for ib in inBoxFolder.Folders]
def showTotalMessages(self):
print("Total Messages in inbox Folders ",len(self.messages))
self.showMesages()
def showMesages(self):
msg= self.messages.GetFirst()
msg = self.messages.GetFirst()
while msg:
try:
print(msg.ReceivedTime)
msg = self.messages.GetNext()
except:
continue
def isSendersEmailIdMatching(self,msg,mailId):
#Firstly, your code will fail if you have an item other than MailItem in the folder, such as ReportItem, MeetingItem, etc.
# You need to check the Class property. Secondly, you need to check the sender email address type and use the
# SenderEmailAddress only for the "SMTP" address type
foundMailId=''
if msg.Class == 43:
if msg.SenderEmailType == "EX":
if msg.Sender.GetExchangeUser() is not None:
if msg.Sender.GetExchangeUser().PrimarySmtpAddress == mailId:
print("1", msg.Sender.GetExchangeUser().PrimarySmtpAddress)
return True
else:
if msg.Sender.GetExchangeDistributionList().PrimarySmtpAddress == mailId:
print("2", msg.Sender.GetExchangeDistributionList().PrimarySmtpAddress)
return True
else:
if str(msg.SenderEmailAddress) == mailId:
print("3",msg.SenderEmailAddress)
return True
return False
#chk, chk.totalHours, chk.daysOfReport
def constructReportData(self):
ll =[]
i:int=1
#os.getcwd() + '\\mail\\'
config = configparser.ConfigParser()
config.read('project.cfg')
# mailId = config.get('HIGHLIGHT_MAIL', 'mail_id')
mailId=config['HIGHLIGHT_MAIL']['mail_id']
highLightColor=config['HIGHLIGHT_MAIL']['color']
os.makedirs("email",mode=0o777,exist_ok=True)
mesages=self.getInBoxMesages()
for msg in mesages:
md = mailData()
i+=1
md.Id=str(i)
name = str(msg.Subject)
name = re.sub('[^A-Za-z0-9]+', '', name) + '.msg'
if self.isSendersEmailIdMatching(msg,mailId):
s = "style=\"" + "color: red\""
subStr = "<p><a href=" + "email\\" + name + " " + s + ">"
subStr += str(msg.Subject)
subStr += "</a></p>"
print(" hara mohan")
elif str(msg.Subject).find("FORM 16") != -1 or str(msg.Subject).find("Action Required") != -1 or \
str(msg.Subject).find("Case#") != -1 :
# s="style=\"" + "color: #73E600\""
s="style=\"" + "color: #CCCC00\""
subStr = "<p><a href=" + "email\\" + name +" "+ s+">"
subStr += str(msg.Subject)
subStr += "</a></p>"
else:
subStr = "<p><a href=" + "email\\" + name +">"
subStr += str(msg.Subject)
subStr += "</a></p>"
md.MailSub=subStr
md.MailRead="NO" if msg.UnRead else "YES"
md.MailRecvdTm=msg.ReceivedTime.strftime("%Y-%m-%d %H:%M:%S")
md.MailReply=self.msgFoundInSentFolde(msg.Subject)
# md.MailRemark= "Please reply if required" if md.MailReply=="NO" else "You have replied to this mail"
getReplyTime= md.MailReply.split("::")
try:
timeStr=str(getReplyTime[1])
print(" INDIA : ",timeStr)
except IndexError:
timeStr=''
pass
if md.MailReply.find("YESFW::")!= -1:
md.MailRemark ="You have forwarded this mail on "+timeStr if len(timeStr)> 0 else ''
elif md.MailReply.find("YESRE") != -1:
# md.MailRemark="You have replied to this mail"
ss="<p style=\"" + "color:#87F717\"" +">"
mystr = ss
mystr += "You have replied to this mail on "+timeStr if len(timeStr)> 0 else ''+"</p>"
md.MailRemark=mystr
print(md.MailRemark)
elif md.MailReply.find("YES::") !=-1:
md.MailRemark = "you have taken action on this mail on " +timeStr if len(timeStr)> 0 else ''
else:
md.MailRemark ="Please reply if required"
md.MailReply = "YES" if md.MailReply.find("YES") != -1 else "NO"
try:
msg.SaveAs(os.getcwd() + '\\email\\' + name)
except Exception as e:
print("error when saving the attachment:" + str(e))
ll.append(md)
hTblObj=HtmlMailReport(ll,self.totalHours,self.daysOfReport)
hTblObj.writeReport()
hTblObj.openInBrowser()
def main():
chk=CheckMailer()
# chk.testSendMsg()
chk.constructReportData()
# chk.showInboxFolders()
# chk.showTotalMessages()
# chk.showMesages()
# chk.testSendMsg()
if __name__=="__main__":
main() |
989,130 | f7fee61832a6d458f3c1d10e50179d3604fbc3ff | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on: Mar 2020
@author: celiacailloux
Updated: Sep 17 2020
Experiment name: Example PEIS SINGLE
"""
#from ECLabData import ECLabDataCV, ECLabDataCP
from ECLabImportCAData import ECLabDataCA
from ECLabImportZIRData import ECLabDataZIR
from ECLabImportCIData import ECLabDataCI
from ECLabImportPEISData import ECLabDataPEIS
import PlottingFunctions as pltF
from PickleFunctions import save_as_pickle, get_saved_pickle
import matplotlib.pyplot as plt
import numpy as np
'_____________ Step 1: Choose Experiment_____________________________________ '
importECLabData = False
" change "
RE_position = 'T'
" Bode "
characteristic_frequency = 120e3
x_major_locator = 50e3
y1_major_locator = 5
x_minor_locator = 5
y1_minor_locator = 5
x_min, x_max = 50e3, 150e3
y1_min, y1_max = 25, 40
" Nyquist "
ReZ_min, ReZ_max = 20, 40
# ImZ_min, ImZ_max = 20, 50
ReZ_major_loc, ReZ_minor_loc= 10, 5
ImZ_major_loc, ImZ_minor_loc= 5, 5
plot_Bodeplot = False
plot_Nyquist = True
save_plot = True
" change "
exp_dir = \
r'O:\list-SurfCat\setups\307-059-largeCO2MEA\Data\Celia - EC largeCO2MEA data\EC-lab\20200916 Cu3 CO2R Staircase'
' ____________ Step 2: Import Data __________________________________________ '
if importECLabData:
""" import PEISs """
PEIS1 = ECLabDataPEIS(file_folder = exp_dir,
pH = 6.8,
reference_electrode_potential = 'Ag/AgCl',
label = 'CO$_2$',
A_electrode = 1, #cm2
uncompensated_R = 85,
)
PEISs = [PEIS1]
save_as_pickle(pkl_data = PEISs, pkl_name = 'PEISs')
else:
PEISs = get_saved_pickle(pkl_name = 'PEISs')
PEIS1 = PEISs[0]
' _____________ Step 3: Import GC summary ___________________________________ '
''' Bodeplot '''
if plot_Bodeplot:
N = len(PEIS1.data)
if N == 1:
fig, axs = plt.subplots(N+1, 1, sharex = False, gridspec_kw={'hspace': 0.1})
fig.set_size_inches(w=6,h=N+1*6)
single_ax = True
else:
fig, axs = plt.subplots(N, 1, sharex = True, gridspec_kw={'hspace': 0.1})
fig.set_size_inches(w=3,h=N*3)
single_ax = False
color = pltF.color_maps('jkib')
title = PEIS1.exp_name
comment = 'Bode plot, R, phi vs f'
idx = 0
for filename, PEIS_data in PEIS1.data.items():
phase_ax = axs[idx].twinx()
label = 'Change'
# col = color(idx/(N))#*1.2))
Z = PEIS_data['|Z|/Ohm']
f = PEIS_data['freq/Hz']#.divide(1000)
phi = PEIS_data['Phase(Z)/deg']
p1, = axs[idx].plot(f, Z, '-o', color = color(1/3), label = label,
alpha = 1,
linewidth = 2,
markerfacecolor='white',
markeredgewidth = 2)
p2, = phase_ax.plot(f, phi, '-o', color = color(2/3), label = label,
alpha = 1,
linewidth = 2,
markerfacecolor='white',
markeredgewidth = 2)
phi_Ru = 0
pltF.global_settings(axs[idx])
pltF.global_settings(phase_ax)
axs[idx].axvline(x = characteristic_frequency, linewidth = 2, color='k', alpha = 0.6, linestyle = '--', zorder = 0)
phase_ax.axhline(y = phi_Ru, linewidth=2, color=color(2/3), alpha = 1, linestyle = '--', zorder = 0)
axs[idx].yaxis.label.set_color(color(1/3))
phase_ax.yaxis.label.set_color(color(2/3))
axs[idx].tick_params(axis='y', colors=color(1/3))
phase_ax.tick_params(axis='y', colors = color(2/3))
" possibly change "
axs[idx].set_ylim(bottom = y1_min,
top = y1_max)
phase_ax.set_ylim(bottom = -10,
top = 2.5)
phase_ax.set_xlim(left = x_min,#-1e3,
right = x_max)
pltF.global_minor_locator(axs[idx],
x_locator = x_minor_locator,
y_locator = y1_minor_locator)
pltF.global_minor_locator(phase_ax,
x_locator = x_minor_locator,
y_locator = 5)
pltF.global_mayor_ylocator(axs[idx],
y_locator = y1_major_locator)
pltF.global_mayor_ylocator(phase_ax,
y_locator = 5)
pltF.global_mayor_xlocator(axs[idx], x_locator = x_major_locator)
pltF.PEIS_global(axs[idx], phase_ax, idx, N, grid = True, legend = False)
idx += 1
if single_ax:
axs[-1].set_axis_off()
axs.flat[-1].set_visible(False)
if save_plot:
pltF.global_savefig(fig, plt_title = title, addcomment = comment)
plt.close()
else:
plt.show()
''' Nyquist '''
if plot_Nyquist:
print('Plotting Nyquist Plot ...')
fig, ax = plt.subplots(1)
fig.set_size_inches(w=4,h=4)
color = pltF.color_maps('jkib')
title = PEIS1.exp_name
comment = 'Nyquist, Im(Z) vs Re(Z)'
N = len(PEIS1.data)
idx = 0
for filename, PEIS_data in PEIS1.data.items():
label = 'Change'
col = color(idx/(N))
Re = PEIS_data['Re(Z)/Ohm']
Im = PEIS_data['-Im(Z)/Ohm']
ax.plot(Re, Im, '-o', color = col, label = label,
alpha = 1,
linewidth = 2,
markerfacecolor='white',
markeredgewidth = 2)
if RE_position == 'T':
ax.axvline(x = 24.8, linewidth=2, color = col, alpha = 1, linestyle = '--', zorder = 0)#, dashes=[3, 1])
elif RE_position == 'C':
ax.axvline(x = 56.8, linewidth=2, color = col, alpha = 1, linestyle = '--', zorder = 0)#, dashes=[3, 1])
else:
print('No RE position given')
idx += 1
ax.axhline(y = 0, linewidth=2, color='k', alpha = 0.5, linestyle = '--', zorder = 0)#, dashes=[3, 1])
pltF.Nyquist_global(ax, grid = True, legend = False)
" possibly change "
ax.set_xlim(left = ReZ_min,
right = ReZ_max)
# ax.set_ylim(bottom = 40, top = 80)
pltF.global_mayor_xlocator(ax,
x_locator = ReZ_major_loc)
pltF.global_mayor_ylocator(ax,
y_locator = ImZ_major_loc)
pltF.global_minor_locator(ax,
x_locator = ReZ_minor_loc,
y_locator = ImZ_minor_loc)
pltF.global_settings(ax)
if save_plot:
pltF.global_savefig(fig, plt_title = title, addcomment = comment)
plt.close()
else:
plt.show()
|
989,131 | 3f00410344605d4292ce11524aca4b9f58b3498c | famous_person="Kyrie Irving"
message="once said: 'No alarm clock needed. My passion wakes me up'"
print(famous_person,message)
|
989,132 | 1ea1e6e5a6ca308dd358f2ec524f0d960df58944 | # encoding: utf-8
from PIL import ImageGrab
import os
import time
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Encoders
def screenGrab():
'''截屏保存为jpg文件'''
im = ImageGrab.grab()
filename = 'Screenshot_' + time.strftime('%Y%m%d%H%M') + '.jpg'
im.save(filename, 'JPEG')
return filename
def sendMail(filename):
'''发送邮件到指定邮箱'''
msg = MIMEMultipart()
msg['Subject'] = filename
msg['From'] = 'MAILADDRESS'
msg['To'] = 'MAILADDRESS'
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(filename, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(filename))
msg.attach(part)
smail = smtplib.SMTP('smtp.163.com')
smail.login('MAILADDRESS', 'PASSWORD')
smail.sendmail('MAILADDRESS', ['MAILADDRESS'], msg.as_string())
smail.quit()
def main():
filename = screenGrab()
sendMail(filename)
if __name__ == '__main__':
main() |
989,133 | 48cc67ee38d3ab9e080573ff36b832137f47bac6 | from sys import argv
from scanner import scanner
from parser import parser
import imageio
from PIL import Image
import numpy as np
class FlipBook :
""" Parent class which will contain all required helper functions and
store necessary variables to produce required flipbook """
def __init__(self):
self.frame_list = []
self.dim = 720
self.blank = np.zeros([self.dim,self.dim,3],dtype=np.uint8)
self.blank.fill(0)
def generate_gif(self, frames):
for frame in frames:
startFrame = frame[0]
endFrame = frame[1]
# print (frame, len(frame))
if len(frame) == 3: # standard appending
imageName = frame[2]
else:
imageName = self.combine_images(frame[2:])
self.add_image_to_frame_list(startFrame, endFrame, imageName)
imageio.mimsave('flipbook.gif', self.frame_list)
print("GIF named flipbook.gif has been generated")
def combine_images(self, imageList):
#assume 2 images being horizontally concatenated
n = len(imageList)
im1 = Image.open(imageList[0])
im2 = Image.open(imageList[1])
# dst = Image.new('RGB', (im1.width + im2.width, min(im1.height, im2.height)))
dst = Image.new('RGB', (self.dim, self.dim))
dst.paste(im1, (0, 0))
# dst.paste(im2, (im1.width, (im1.height - im2.height) // 2))
dst.paste(im2, (im1.width, 0))
return dst
def add_image_to_frame_list(self,startFrame, endFrame, imageName):
""" add other params/functions to do resizing/positioning etc """
for i in range(startFrame-1, endFrame-1):
try:
# image = imageio.imread(imageName)
im = Image.open(imageName)
im = im.resize((720, 720))
self.frame_list.append(im)
# self.frame_list.append(im)
except:
print (imageName, " not found.")
# BufferedImage bi= new BufferedImage(320,240,BufferedImage.TYPE_BYTE_GRAY);
im=self.blank
self.frame_list.append(im)
def parse_input(self, text, markers):
code, frames = parser(text, markers)
if code:
self.generate_gif(frames)
else:
exit()
def scan_input(self, text):
code, markers = scanner(text)
if code:
self.parse_input(text, markers)
else:
exit()
def main(argv):
if len(argv) != 2:
print("Usage: python3 main.py <inputfile>")
return
#read input file contents
ipfile = argv[1]
if ipfile.endswith(".flip") is False:
print ("Input a .flip file")
exit()
file_obj = open(ipfile,"r")
if file_obj.mode=="r":
text = file_obj.read()
# print (text)
FB = FlipBook()
FB.scan_input(text)
if __name__ == '__main__':
main(argv) |
989,134 | 9235eb080d1ef16f23e72ba8ccf2f448356bc7b5 | str=str(input("Enter a string: "))
if len(str) < 2:
print("empty string")
else:
str1=str[0:2] + str[-2:]
print("new string: ", str1)
|
989,135 | 76114de1520d633ebd311e1ad3e51777dbf887bd | from django.db import models
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.db.models.signals import post_delete
from datetime import datetime
class Card(models.Model):
"""
'board_list' is the list the card is member of
'author' is the user who created the card
'card_title' is the title of the card
'created_date' automatically set everytime a new card is created
'updated_date automatically set everytime a card is updated
'archived' is set to True when a user want to archive the card
"""
board_list = models.ForeignKey('List', on_delete=models.CASCADE)
author = models.ForeignKey(User, on_delete=models.CASCADE)
card_title = models.CharField(max_length=200)
card_description = models.TextField(null=True)
image = models.ImageField(upload_to='images/', null=True)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
archived = models.BooleanField(default=False)
def __str__(self):
return self.card_title
class List(models.Model):
"""
'board' is the board the list is member of
'author' is the user who created the list
'list_title' is the title of the list
'created_date' automatically set everytime a new list is created
'updated_date automatically set everytime a list is updated
'archived' is set to True when a user want to archive the list
"""
board = models.ForeignKey('Board', on_delete=models.CASCADE)
author = models.ForeignKey(User, on_delete=models.CASCADE)
list_title = models.CharField(max_length=200)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
archived = models.BooleanField(default=False)
def __str__(self):
return self.list_title
class Board(models.Model):
"""
'author' is the user who created the board
Automatically assign value to 'created_date' when instance is created.
Automatically update value to 'updated_date' when save method is called.
"""
author = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True, editable=True)
archived = models.BooleanField(default=False)
def __str__(self):
return self.title
class BoardMembers(models.Model):
"""
'board' is the board.
'members' are the member of the board
'deactivate' status of the member is equal to False when member is being added to the board.
'owner' is the user who created the board.
"""
board = models.ForeignKey('Board', on_delete=models.CASCADE)
members = models.ForeignKey(User, on_delete=models.CASCADE)
deactivate = models.BooleanField(default=True)
owner = models.BooleanField(default=False)
class BoardInvite(models.Model):
"""
Stores the inivited user with no existing account here.
"""
board = models.ForeignKey('Board', on_delete=models.CASCADE)
email_member = models.CharField(max_length=200)
class UserProfile(models.Model):
"""
User Profile
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(default='')
def create_user_profile(sender, instance, created, **kwargs):
"""
Create user profile every time a user was created
'created' is a Signal instance that is use to create a user profile
'post_save.connect' creates UserProfile after the User was created
"""
if created:
user_profile = UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User) |
989,136 | 652bfc953282d81b9229ce2c5807cfdc88360a8d | from overtime.tests.algorithms.centrality.test_closeness import *
from overtime.tests.algorithms.centrality.test_betweenness import *
from overtime.tests.algorithms.centrality.test_pagerank import *
from overtime.tests.algorithms.centrality.test_degree import * |
989,137 | 2f0d235d217faf3bd5edb04ae40b0e8ed711cdc1 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
outfile=open("diffconsts.dat","w")
tempdir=os.listdir(os.getcwd())
for temp in tempdir:
if os.path.isdir(temp) == True and RepresentsInt(temp)==True:
os.chdir(temp)
data=pd.read_table('statis_sum.dat', sep='\s+', header=None)
data.columns=['Time' if x==0 else x for x in data.columns]
x=data['Time']
diffs=[]
for j in range(1,len(data.columns)):
y=data[j]
x1=[]
y1=[]
d1=[]
for i in range(len(x)):
if y[i]>4.0 and x[i]>50.0:
x1.append(x[i])
y1.append(y[i])
#Purely for visual inspection, not used for Dfinal calculation
Dapprox=(y[i]/x[i])/6 # 10^-8 m^2/s
d1.append(Dapprox)
plt.plot(x1,y1)
if len(x1) < 50:
D = 0.0
else:
z1=np.polyfit(x1,y1,1)
p1=np.poly1d(z1)
plt.plot(x1,p1(x1),c="r",linewidth=1.0)
y1hat=p1(x1)
y1bar=np.sum(y1)/len(y1)
ssreg1=np.sum((y1hat-y1bar)**2)
sstot1=np.sum((y1-y1bar)**2)
rsq1=ssreg1/sstot1
D=z1[0]/6 # 10^-8 m^2/s
print(j, D)
diffs.append(D)
plt.title(int(temp))
plt.show()
os.chdir("../")
outfile.write('%10s ' % temp)
outfile.write('%10f'*len(diffs) % tuple(diffs))
outfile.write("\n")
outfile.close()
|
989,138 | b2e6331365cc5e44b60856811b816c7bbec0fa6b | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Steps(CMakePackage):
"""STochastic Engine for Pathway Simulation"""
homepage = "https://groups.oist.jp/cnu/software"
git = "https://github.com/CNS-OIST/STEPS.git"
version("3.3.0", submodules=True)
version("3.2.0", submodules=True)
version("develop", branch="master", submodules=True)
variant("native", default=True, description="Generate non-portable arch-specific code")
variant("lapack", default=False, description="Use new BDSystem/Lapack code for E-Field solver")
variant("petsc", default=False, description="Use PETSc library for parallel E-Field solver")
variant("mpi", default=True, description="Use MPI for parallel solvers")
depends_on("blas")
depends_on("lapack", when="+lapack")
depends_on("mpi", when="+mpi")
depends_on("petsc~debug+int64", when="+petsc")
depends_on("python")
depends_on("py-cython")
patch("for_aarch64.patch", when="target=aarch64:")
def cmake_args(self):
args = []
spec = self.spec
if "+native" in spec:
args.append("-DTARGET_NATIVE_ARCH:BOOL=True")
else:
args.append("-DTARGET_NATIVE_ARCH:BOOL=False")
if "+lapack" in spec:
args.append("-DUSE_BDSYSTEM_LAPACK:BOOL=True")
else:
args.append("-DUSE_BDSYSTEM_LAPACK:BOOL=False")
if "+petsc" in spec:
args.append("-DUSE_PETSC:BOOL=True")
else:
args.append("-DUSE_PETSC:BOOL=False")
if "+mpi" in spec:
args.append("-DUSE_MPI:BOOL=True")
else:
args.append("-DUSE_MPI:BOOL=False")
args.append("-DBLAS_LIBRARIES=" + spec["blas"].libs.joined(";"))
return args
def setup_run_environment(self, env):
# This recipe exposes a Python package from a C++ CMake project.
# This hook is required to reproduce what Spack PythonPackage does.
env.prepend_path("PYTHONPATH", self.prefix)
|
989,139 | 8732c104706833ea5177bc6af45c5e1ed4436aa4 | # Coverage test of __main__.py, no tight checks on output messages
from test_redisrwlock_connection import runRedisServer, terminateRedisServer
import unittest
import os
import signal
import subprocess
# Run command return exit status and output messages.
# When wait is False, read at most limit lines and retrn without wait
def runCmdOutput(args, wait=True, limit=0):
cmd_args = ['python3', '-m', 'redisrwlock'] + args
cmd = subprocess.Popen(cmd_args,
stdout=subprocess.PIPE,
universal_newlines=True)
output = list()
count = 0
for line in cmd.stdout:
output.append(line)
if not wait:
count += 1
if count >= limit:
break
if wait:
cmd.wait()
cmd.stdout.close()
return cmd, output
def setUpModule():
global _server, _dumper
_server, _dumper = runRedisServer(port=7788)
def tearDownModule():
global _server, _dumper
terminateRedisServer(_server, _dumper)
class TestRedisRwlock_cmdline(unittest.TestCase):
def test_option_unrecognized(self):
"""test usage error for unrecognized option"""
cmd, output = runCmdOutput(['--unrecognized'])
self.assertEqual(cmd.returncode, os.EX_USAGE)
def test_option_unhandled(self):
"""test not handled option with hidden test option"""
cmd, output = runCmdOutput(['--__unhandled__'])
self.assertEqual(cmd.returncode, os.EX_USAGE)
def test_option_help(self):
"""test --help option"""
cmd, output = runCmdOutput(['--help'])
self.assertEqual(cmd.returncode, os.EX_OK)
def test_option_version(self):
"""test --version option"""
cmd, output = runCmdOutput(['--version'])
self.assertEqual(cmd.returncode, os.EX_OK)
def test_option_repeat_interval(self):
"""test --retry and --interval options"""
# run with --retry, see 2 lines, then kill -INT
cmd, output = runCmdOutput(['-p', '7788', '-r'],
wait=False, limit=2)
cmd.send_signal(signal.SIGINT)
self.assertEqual(cmd.wait(), 1)
cmd.stdout.close()
# run with --retry, see 4 lines, then kill -INT
cmd, output = runCmdOutput(['-p', '7788', '-r', '-i', '1'],
wait=False, limit=4)
cmd.send_signal(signal.SIGINT)
self.assertEqual(cmd.wait(), 1)
cmd.stdout.close()
# invalid --interval option argument (int > 0)
cmd, output = runCmdOutput(['-p', '7788', '-i', '0'])
self.assertEqual(cmd.returncode, os.EX_USAGE)
# --interval option argument ignored if no --retry
cmd, output = runCmdOutput(['-p', '7788', '-i', '1000'])
self.assertEqual(cmd.returncode, os.EX_OK)
def test_option_server_port(self):
"""test --server and --port options"""
# empty redis-server host name
cmd, output = runCmdOutput(['-s', '', '-p', '7788'])
self.assertEqual(cmd.returncode, os.EX_USAGE)
# port number out of range
cmd, output = runCmdOutput(['-s', 'localhost', '-p', '99999'])
self.assertEqual(cmd.returncode, os.EX_USAGE)
def test_logging_config(self):
"""test logging config from file or default"""
topdir = os.path.dirname(os.path.dirname(__file__))
# logging config from default
os.system('rm %s/logging.conf' % topdir)
cmd, output = runCmdOutput(['-p', '7788'])
self.assertEqual(cmd.returncode, os.EX_OK)
# logging config from file
os.system('cp %s/logging.conf.sample %s/logging.conf' %
(topdir, topdir))
cmd, output = runCmdOutput(['-p', '7788'])
self.assertEqual(cmd.returncode, os.EX_OK)
|
989,140 | 9fd768d42041421e2701f736873d5537af9d425c | import os
import pandas as pd
import getSimilar
from getSimilar import get_similar_Companies
compDF = getSimilar.compDF #
contactDF = getSimilar.contactDF
personDF = getSimilar.personDF
#list of all companies in database along with their indexes
allCompanies = compDF[['comp_op_name']].reset_index()
allCompanies.columns = ['data', 'value']
allCompanies = allCompanies.to_json(orient='records')
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route("/", methods=['GET','POST'])
def displayCompany():
"""
Display homepage
"""
return render_template('index.html', allCompanies = allCompanies)
@app.route("/findsimilar/<int:companyID>", methods=['GET'])
def findsimilar(companyID):
"""
Display result of get_similar_Companies function
"""
similarDF = get_similar_Companies(companyID, 10)
return render_template('similar.html',
similarDF=similarDF,
selectcompanyDF=compDF.iloc[companyID],
selectID=companyID)
@app.route("/profile/<int:companyID>", methods=['GET'])
def displayProfile(companyID):
"""
Display selected company information
"""
a = compDF.iloc[companyID].comp_ID
selectPersonDF = personDF[personDF.comp_ID == a]
return render_template('profile.html',
selectcompanyDF=compDF.iloc[companyID],
selectContactDF=contactDF.iloc[companyID],
selectPersonDF=selectPersonDF,
selectID=companyID)
if __name__ == '__main__':
app.debug = True
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
989,141 | 6f14883d0670911a434893e770648934cf999b09 | import matplotlib.pyplot as plt
# 画图 #########################
# 实验二
x = [10, 15, 20, 25, 30, 35, 40]
'''
# block15, 18
y1 = [13.5, 12, 10, 6, 5, 1.2, 1]
y2 = [20, 15.3, 9.4, 7, 5.5, 4.67, 3]
y3 = [54.5, 41.7, 34.9, 29.3, 25.8, 20.7, 18.4]
# block15,28
y1 = [22.4, 14, 10.1, 7.62, 5.5, 3.7, 2]
y2 = [20.04, 15.69, 9.9, 7.5, 6, 5, 4]
y3 = [58.3, 44.7, 38.4, 31, 25.5, 20.8, 15.2]
'''
'''
# block20,18
y1 = [22, 16, 8, 6, 7.13 ,6.2, 4]
y2 = [21, 19, 12, 7.4, 5.5, 4.4, 1.4]
y3 = [59.8, 42.2, 36.7, 29.8, 26.8, 20.4, 18.5]
'''
# block20,28
y1 = [22, 20, 18, 13, 8, 7.5, 3.5]
y2 = [23, 20, 13.3, 8, 7, 6.7, 4.1]
y3 = [60.6, 54.6, 45, 34, 26.4, 22 ,19.5]
figsize = 4.5,4.2
figure, ax = plt.subplots(figsize=figsize)
plt.plot(x, y1, color='mediumseagreen', label='MRSC', Marker='^')
plt.plot(x, y2, color='cornflowerblue', label='RTN', linestyle='-', Marker='o')
plt.plot(x, y3, color='lightsalmon', label='RDNR', linestyle='-', Marker='s')
# 实验二
# plt.title("Comparison of Alive Nodes After Second Destroy")
plt.xlabel("Percentage of destroyed nodes")
plt.ylabel("Percentage of alive nodes")
# plt.grid(b=True, axis='y') # 只显示y轴网格线
ax=plt.gca() # 获得坐标轴的句柄
ax.spines['bottom'].set_linewidth(1) # 设置底部坐标轴的粗细
ax.spines['left'].set_linewidth(1) # 设置左边坐标轴的粗细
ax.spines['right'].set_linewidth(1) # 设置右边坐标轴的粗细
ax.spines['top'].set_linewidth(1) # 设置上部坐标轴的粗细
plt.axis([9, 41, -5, 65])
plt.legend(bbox_to_anchor=(0.03, 0.47), loc=3, borderaxespad=0,
edgecolor='k', fancybox=False)
plt.show()
|
989,142 | 064b5d2c8641675bb2d2d53f252297f5221795d4 |
import os, time
import pathlib
from multiprocessing import Process
import papermill as pm
def run(dir, file, args=None):
a = "python " + dir + '/' + file
if type(args) is list:
for arg in args:
a += ' ' + str(arg)
elif type(args) is str:
a += ' ' + args
print(a)
os.system(a)
def run_nb(dir, file, args=None):
a = "papermill " + dir + '/' + file
if type(args) is list:
for arg in args:
a += ' ' + str(arg)
elif type(args) is str:
a += ' ' + args
print(a)
os.system(a)
def runMulti(dir, file, args=None):
startDates = args[0]
endDates = args[1]
startDates = [startDates] if type(startDates) is not list else startDates
endDates = [endDates] if type(endDates) is not list else endDates
proc = []
for i in range(min(len(startDates),len(endDates))):
p = Process(target=run, args=(dir, file, [startDates[i], endDates[i]] + args[2:] ))
proc.append(p)
for p in proc:
p.start()
for p in proc:
p.join()
def timer_start():
return time.time()
def timer_elapsed(t0):
return time.time() - t0
def timer_restart(t0, msg):
print(timer_elapsed(t0), msg)
return timer_start()
def main():
dir = str(pathlib.Path(__file__).parent.absolute())
# cmap options: https://matplotlib.org/stable/tutorials/colors/colormaps.html
# startYear startMonth endYear endMonth group dataset cmap
t0 = timer_start()
t1 = t0
# run(dir, 'read_acag_pm2-5.py', ['200001', '200012', 'YlOrRd', os.path.join('USA_states_counties', 'us_states'), '01-AL-Alabama.geojson', USAcounties, True])
run(dir, 'TrainingaScorecardmodelusingAutoLoansDataset.py', [])
t1 = timer_restart(t1, 'main total time')
if __name__ == "__main__":
main()
|
989,143 | 3aefaedcd2292dc8aac88f06a1142fc785eb6031 | # Importing library random
import random
import csv
# for x in range(10):
# print(random.randint(1,101))
nbphonenb = int(input("How many Irish phone number do you want to generate?"))
phonenumbers = []
for x in range(nbphonenb):
num = []
for y in range(7):
num.append(random.randint(0,9))
# print(num[y])
phonenumbers.append(f"08 {num[0]}{num[1]}{num[2]} {num[3]}{num[4]}{num[5]}{num[5]}")
print(phonenumbers[x])
with open('phones.csv', mode='a') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow([phonenumbers[x]])
|
989,144 | a8397fdd08278065e2aa955f70b90c407d041422 |
# AND Gate
def AND(x1, x2):
w1, w2, theta = 0.5, 0.5, 0.7
tmp = x1*w1 + x2*w2
if tmp <= theta:
return 0
elif tmp> theta:
return 1
print(AND(0, 0))
print(AND(1, 0))
print(AND(0, 1))
print(AND(1, 1))
# Add weight and bias
import numpy as np
x = np.array([0,1])
w = np.array([0.5,0.5])
b = -0.7
print(w*x)
print(np.sum(w*x))
print(np.sum(w*x)+b)
# Re-create AND Gate with weight and bias
def AND(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.7
tmp = np.sum(w*x) + b
if tmp <= 0:
return 0
else:
return 1
# Re-create NAND Gate with weight and bias
def NAND(x1, x2):
x = np.array([x1, x2])
w = np.array([-0.5, -0.5])
b = 0.7
tmp = np.sum(w*x) + b
if tmp <= 0:
return 0
else:
return 1
# Re-create OR Gate with weight and bias
def OR(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(w*x) + b
if tmp <= 0:
return 0
else:
return 1
# XOR Gate by using AND, NAND, OR Gate
def XOR(x1, x2):
s1 = NAND(x1, x2)
s2 = OR(x1, x2)
y = AND(s1, s2)
return y
print(XOR(0,0))
print(XOR(1,0))
print(XOR(0,1))
print(XOR(1,1)) |
989,145 | 8ec9cd951913f2eb249a6e324c0ff0755ca7ea10 | from flask import Flask
from flask_restful import Api, Resource, reqparse, abort, fields, marshal_with
from flask_sqlalchemy import SQLAlchemy, Model
import json
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
db = SQLAlchemy(app)
class VideoModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False)
views = db.Column(db.Integer, nullable=False)
likes = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f"Video(name = {self.name}, views = {self.views}, likes = {self.likes})"
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
# db.create_all()
video_put_args = reqparse.RequestParser()
video_put_args.add_argument("name", type=str, help='Please specify name of the video', required=True)
video_put_args.add_argument("views", type=int, help='Please specify views of the video', required=True)
video_put_args.add_argument("likes", type=int, help='Please specify likes of the video', required=True)
video_update_args = reqparse.RequestParser()
video_update_args.add_argument("name", type=str, help="Name of the video is required")
video_update_args.add_argument("views", type=int, help="Views of the video")
video_update_args.add_argument("likes", type=int, help="Likes on the video")
resource_fields = {
'id': fields.Integer,
"name": fields.String,
"views": fields.Integer,
"likes": fields.Integer
}
resource_fields_all = [{
'id': fields.Integer,
"name": fields.String,
"views": fields.Integer,
"likes": fields.Integer
}]
class GetAllVideos(Resource):
# @marshal_with(resource_fields)
def get(self):
result = VideoModel.query.all()
new_results = []
# print(result)
for index, modelObj in enumerate(result):
new_results.append(modelObj.as_dict())
print(new_results)
return new_results
class Video(Resource):
@marshal_with(resource_fields)
def get(self, video_id):
result = VideoModel.query.filter_by(id=video_id).first()
if not result:
abort(404, message='Not Found')
return result
@marshal_with(resource_fields)
def put(self, video_id):
if VideoModel.query.filter_by(id=video_id).first():
abort(409, message='Such Id Already exists')
args = video_put_args.parse_args()
video = VideoModel(id=video_id, name=args['name'], views=args['views'], likes=args['likes'])
db.session.add(video)
db.session.commit()
return video, 201
@marshal_with(resource_fields)
def patch(self, video_id):
args = video_update_args.parse_args()
result = VideoModel.query.filter_by(id=video_id).first()
if not result:
abort(404, message="Video doesn't exist, cannot update")
if args['name']:
result.name = args['name']
if args['views']:
result.views = args['views']
if args['likes']:
result.likes = args['likes']
db.session.commit()
return result
# def delete(self, video_id):
# # abort_if_video_doesnt_exist(videoId)
# del videos[video_id]
# return 'successfully deleted', 204
class HelloWorld(Resource):
def get(self):
return {"message": "Malkoto maimunche Lora burka v taratora"}
def post(self):
return {"message": "post received"}
api.add_resource(HelloWorld, '/helloworld')
api.add_resource(Video, '/videos/<int:video_id>')
api.add_resource(GetAllVideos, '/videos-all')
if __name__ == '__main__':
app.run(debug=True)
|
989,146 | 06a1f191d5ed113bb104bc17133d8c375421bfb9 | from selenium import webdriver
from time import sleep
from selenium.webdriver import ActionChains
bro = webdriver.Chrome('./chromedriver.exe')
bro.get('https://qzone.qq.com')
bro.switch_to.frame('login_frame')
user_passwd_enter = bro.find_element_by_id('switcher_plogin')
user_passwd_enter.click()
userName_tag = bro.find_element_by_id('u')
userName_tag.send_keys('1234568') #用户名
sleep(1)
password_tag = bro.find_element_by_id('p')
password_tag.send_keys('1234568') #密码
sleep(1)
login_button = bro.find_element_by_id('login_button')
login_button.click()
#会有滑动块验证码验证
sleep(3)
bro.quit()
|
989,147 | 713343f4e7845b56c56520916fcead64ec8272ac |
import argparse
from SDCServer.bacnet.readServer import ReadServer;
import threading
def main():
parser = argparse.ArgumentParser(description='Run SDC Server')
parser.add_argument('--read_port', default=61221, type=int, help='Read server listening port');
parser.add_argument('--write_port', default=61222, type=int, help='Write server listening port');
parser.add_argument('--read_config', default='SDCServer/bacnet/IW_lighting_ReadConfig.cfg', type=str, help='Read server configuration file');
parser.add_argument('--write_config', default='SDCServer/bacnet/IW9701_WriteConfig.cfg', type=str, help='Write server configuration file')
args = parser.parse_args();
# Run
readServerIns = ReadServer();
runReadServer = lambda: readServerIns.runReadServer(args.read_port, args.read_config);
readThread = threading.Thread(target = (runReadServer));
readThread.start();
if __name__ == '__main__':
main()
|
989,148 | 430e57e6584674ea9827377c54e9ee9652adc503 | from PttParser import *
import pymongo
from pymongo import MongoClient
# setup mongodb client
dbClient = MongoClient()
db = dbClient['FuMou']
articleList = db['Article-List']
parser = PttArticleParser()
for art in articleList.find():
parser.reset()
url = "http://www.ptt.cc%s" % art['url']
parser.parse_article(url)
art.update( parser.meta )
|
989,149 | 9959ca504a73da242c199ed6d27eb59370c052ba | import pyopenssl
f=open('original')
|
989,150 | 9284f8d41000c1a23c09b0ccfee894949b91ac32 | #!/usr/bin/python
import mpd
import select
import xmpp
import time
import configuration
TAGS = {'artist': 'artist', 'title': 'title', 'album': 'source'}
NOTPLAYING = {}
class MpdConnection:
def __init__(self, host, port, password):
self._host = host
self._port = port
self._password = password
def __enter__(self):
print("Opening mpd connection")
self._conn = mpd.MPDClient()
self._conn.connect(host = self._host, port = self._port)
if self._password is not None:
self._conn.password(self._password)
return self
def __exit__(self, *args):
print("Closing mpd connection")
self._conn.disconnect()
def state(self):
return self._conn.status()['state']
def currentsong(self):
return self._conn.currentsong()
def idle(self):
self._conn.send_idle()
select.select([self._conn], [], [])
self._conn.fetch_idle()
class XmppTune:
NS_TUNE = 'http://jabber.org/protocol/tune'
def __init__(self, jid, password):
self._jid = xmpp.protocol.JID(jid)
self._password = password
def __enter__(self):
print("Opening xmpp connection")
self._conn = xmpp.client.Client(self._jid.getDomain(), debug=[])
self._conn.connect()
self._conn.auth(self._jid.getNode(), self._password, self._jid.getResource())
self._invisibility()
self._conn.send(xmpp.protocol.Presence(priority = -128))
return self
def __exit__(self, *args):
print("Closing xmpp connection")
self._publish({})
self._conn.disconnect()
def _invisibility(self):
iq = xmpp.protocol.Iq(frm = self._jid, typ = 'set')
query = iq.addChild('query', namespace = xmpp.protocol.NS_PRIVACY)
list_ = query.addChild('list', {'name': 'invisible'})
item = list_.addChild('item', {'action': 'deny', 'order': 1})
presence_out = item.addChild('presence-out')
self._conn.send(iq)
iq = xmpp.protocol.Iq(frm = self._jid, typ = 'set')
query = iq.addChild('query', namespace = xmpp.protocol.NS_PRIVACY)
active = query.addChild('active', {'name': 'invisible'})
self._conn.send(iq)
def _publish(self, song):
"""
Build the xml element and send it.
http://xmpp.org/extensions/xep-0118.html
"""
iq = xmpp.protocol.Iq(frm = self._jid, typ = 'set')
pubsub = iq.addChild('pubsub', namespace = xmpp.protocol.NS_PUBSUB)
publish = pubsub.addChild('publish', {'node': self.NS_TUNE})
item = publish.addChild('item')
tune = item.addChild('tune', namespace = self.NS_TUNE)
for tag, value in song.items():
tune.addChild(tag).setData(value)
self._conn.send(iq)
#print(str(iq))
def song_changed(self, song):
"""
Handle change of active song.
"""
if song == NOTPLAYING:
print("Not playing")
else:
print("Changed to: {} - {}". format(song.get('artist', 'Unknown artist'), song.get('title', 'Unknown title')))
self._publish({TAGS[tag]: value for (tag, value) in song.items() if tag in TAGS})
def work():
lastsong = None
with MpdConnection(configuration.MPD_HOST, configuration.MPD_PORT,
configuration.MPD_PASSWORD) as mpd_conn:
with XmppTune(configuration.XMPP_JID,
configuration.XMPP_PASSWORD) as xmpp_conn:
while True:
if mpd_conn.state() != 'play':
currentsong = NOTPLAYING
else:
currentsong = mpd_conn.currentsong()
if currentsong != lastsong:
lastsong = currentsong
xmpp_conn.song_changed(currentsong)
mpd_conn.idle()
try:
while True:
try:
work()
except (IOError, mpd.ConnectionError):
print("Waiting {} seconds for retry".format(configuration.RETRY_TIME))
time.sleep(configuration.RETRY_TIME)
except KeyboardInterrupt:
print("Interrupted.")
|
989,151 | a9503eccb4efb2ba54d92624286ad0aa584063f8 | __all__ = [
"CommsDaemon",
]
import signal
import time
import datetime
import ConfigParser
import top
from top.utils.log import log
from top.utils.files import get_directory_files_list
from top.utils.setter import (set_scalar,
set_list)
class CommsDaemon(top.DaemonService):
"""Daemoniser facility for the :class:`top.Comms` class.
.. attribute:: *comms_dir*
directory where comms files are read from for further processing
.. attribute:: *q_warning*
comms queue warning threshold. If number of messages exceeds this
threshold (and is under the :attr:`q_error` threshold then a
warning email notification is triggered
.. attribute:: *q_error*
comms queue error threshold. If number of messages exceeds this
threshold then an error email notification is triggered and
the comms daemon is terminated
.. attribute:: *controlled_templates*
list of comms templates that are controlled by the delivery
period thresholds
.. attribute:: *uncontrolled_templates*
list of comms templates that are *NOT* controlled by the delivery
period thresholds. In other words, comms can be sent 24 x 7
.. attribute:: *skip_days*
list of days ['Saturday', 'Sunday'] to not send messages. An empty
list (or no skip days) suggests that comms can be sent on any day
.. attribute:: *send_time_ranges*
time ranges when comms can be sent. An empty list (or no
time ranges) suggests that comms can be sent at any time
"""
_comms = None
_comms_dir = None
_q_warning = 100
_q_error = 1000
_controlled_templates = ['body']
_uncontrolled_templates = ['ret']
_skip_days = ['Sunday']
_send_time_ranges = ['08:00-19:00']
@property
def comms(self):
return self._comms
@property
def comms_dir(self):
return self._comms_dir
@set_scalar
def set_comms_dir(self, value):
pass
@property
def q_warning(self):
return self._q_warning
@set_scalar
def set_q_warning(self, value):
pass
@property
def q_error(self):
return self._q_error
@set_scalar
def set_q_error(self, value):
pass
@property
def controlled_templates(self):
return self._controlled_templates
@set_list
def set_controlled_templates(self, values=None):
pass
@property
def uncontrolled_templates(self):
return self._uncontrolled_templates
@set_list
def set_uncontrolled_templates(self, values=None):
pass
@property
def skip_days(self):
return self._skip_days
@set_list
def set_skip_days(self, values=None):
pass
@property
def send_time_ranges(self):
return self._send_time_ranges
@set_list
def set_send_time_ranges(self, values=None):
pass
@property
def sms_api(self):
sms_api = {}
try:
sms_api['api'] = self.config.get('rest', 'sms_api')
sms_api['api_username'] = self.config.get('rest', 'sms_user')
sms_api['api_password'] = self.config.get('rest', 'sms_pw')
log.debug('%s SMS REST credentials: %s' %
(self._facility, str(sms_api)))
except (ConfigParser.NoOptionError,
ConfigParser.NoSectionError), err:
log.debug('%s SMS REST credentials not in config: %s' %
(self._facility, err))
return sms_api
@property
def email_api(self):
email_api = {}
try:
email_api['api'] = self.config.get('rest', 'email_api')
email_api['api_username'] = self.config.get('rest', 'email_user')
email_api['api_password'] = self.config.get('rest', 'email_pw')
email_api['support'] = self.config.get('rest', 'failed_email')
log.debug('%s Email REST credentials: %s' %
(self._facility, str(email_api)))
except (ConfigParser.NoOptionError,
ConfigParser.NoSectionError), err:
log.debug('%s Email REST credentials not in config: %s' %
(self._facility, err))
return email_api
@property
def comms_kwargs(self):
kwargs = {}
try:
kwargs['prod'] = self.prod
except AttributeError, err:
log.debug('%s prod instance name not in config: %s ' %
(self._facility, err))
try:
kwargs['db'] = self.config.db_kwargs()
except AttributeError, err:
log.debug('%s DB kwargs not in config: %s ' %
(self._facility, err))
try:
kwargs['proxy'] = self.config.proxy_string()
except AttributeError, err:
log.debug('%s proxy kwargs not in config: %s ' %
(self._facility, err))
try:
kwargs['scheme'] = self.config.proxy_scheme
except AttributeError, err:
log.debug('%s proxy scheme not in config: %s ' %
(self._facility, err))
try:
kwargs['sms_api'] = self.sms_api
except AttributeError, err:
log.debug('%s SMS REST credentials not in config: %s ' %
(self._facility, err))
try:
kwargs['email_api'] = self.email_api
except AttributeError, err:
log.debug('%s Email REST credentials not in config: %s ' %
(self._facility, err))
try:
kwargs['templates'] = (self.config.controlled_templates +
self.config.uncontrolled_templates)
except (ConfigParser.NoOptionError,
ConfigParser.NoSectionError), err:
log.debug('%s templates cannot be built from %s: %s ' %
(self._facility, 'comms.*controlled_templates', err))
try:
kwargs['returns_templates'] = self.config.uncontrolled_templates
except (ConfigParser.NoOptionError,
ConfigParser.NoSectionError), err:
log.debug('%s returns_templates cannot be built from %s: %s ' %
(self._facility, 'comms.uncontrolled_templates', err))
log.debug('%s comms_kwargs: "%s"' % (self.facility, kwargs))
return kwargs
def __init__(self,
pidfile,
file=None,
dry=False,
batch=False,
config=None):
top.DaemonService.__init__(self,
pidfile=pidfile,
file=file,
dry=dry,
batch=batch,
config=config)
if self.config is not None:
self.set_comms_dir(self.config.comms_dir)
self.set_loop(self.config.comms_loop)
self.set_q_warning(self.config.comms_q_warning)
self.set_q_error(self.config.comms_q_error)
self.set_controlled_templates(self.config.controlled_templates)
tmp = self.config.uncontrolled_templates
self.set_uncontrolled_templates(tmp)
self.set_skip_days(self.config.skip_days)
self.set_send_time_ranges(self.config.send_time_ranges)
def _start(self, event):
"""Override the :method:`top.utils.Daemon._start` method.
Will perform a single iteration if the :attr:`file` attribute has
a list of filenames to process. Similarly, dry and batch modes
only cycle through a single iteration.
**Args:**
*event* (:mod:`threading.Event`): Internal semaphore that
can be set via the :mod:`signal.signal.SIGTERM` signal event
to perform a function within the running proess.
"""
signal.signal(signal.SIGTERM, self._exit_handler)
if self._comms is None:
self._comms = top.Comms(**(self.comms_kwargs))
all_templates = (self.controlled_templates +
self.uncontrolled_templates)
log.info('Enabled templates: %s' % all_templates)
while not event.isSet():
files = []
if not self._comms.db():
log.error('ODBC connection failure -- aborting')
event.set()
continue
if not self._skip_day():
if self._within_time_ranges():
if self.file is not None:
files.append(self.file)
event.set()
else:
for filter in self.controlled_templates:
log.debug('controlled template filter: %s' %
filter)
files.extend(self.get_comms_files(filter))
for filter in self.uncontrolled_templates:
log.debug('uncontrolled template filter: %s' % filter)
files.extend(self.get_comms_files(filter))
if len(files):
self.reporter.reset('Comms')
log.info('All files: "%s"' % files)
# Start processing files.
if self._message_queue_ok(len(files), dry=self.dry):
for file in files:
self.reporter(self._comms.process(file, self.dry))
if len(files):
stats = self.reporter.report()
log.info(stats)
else:
log.info('Comms queue threshold breached -- aborting')
event.set()
if not event.isSet():
if self.dry:
log.info('Dry run iteration complete -- aborting')
event.set()
elif self.batch:
log.info('Batch run iteration complete -- aborting')
event.set()
else:
time.sleep(self.loop)
def _skip_day(self):
"""Check whether comms is configured to skip current day of week.
**Returns**:
``boolean``::
``True`` if current day is a skip day
``False`` if current day is **NOT** a skip day
"""
is_skip_day = False
current_day = datetime.datetime.now().strftime('%A').lower()
log.debug('Current day/skip days: "%s/%s"' %
(current_day.title(), str(self.skip_days)))
if current_day in [x.lower() for x in self.skip_days]:
log.info('%s is a configured comms skip day' %
current_day.title())
is_skip_day = True
log.debug('Is a comms skip day?: "%s"' % str(is_skip_day))
return is_skip_day
def _within_time_ranges(self):
"""Check whether comms is configured to send comms at current time.
Expects ranges to be of the format 'HH:MM-HH:MM' otherwise it will
return ``False`` as no assumptions are made.
**Returns**:
``boolean``::
``True`` if current time is within the ranges
``False`` if current day is **NOT** within the ranges
"""
is_within_time_range = True
current_time = datetime.datetime.now()
log.debug('Current time/send_time_ranges: "%s/%s"' %
(str(current_time).split('.')[0],
str(self.send_time_ranges)))
for range in self.send_time_ranges:
try:
(lower_str, upper_str) = range.split('-')
except ValueError, err:
log.error('Time range "%s" processing error: %s' %
(range, err))
is_within_time_range = False
break
lower_str = '%s %s' % (current_time.strftime('%Y-%m-%d'),
lower_str)
log.debug('Lower date string: %s' % lower_str)
upper_str = '%s %s' % (current_time.strftime('%Y-%m-%d'),
upper_str)
log.debug('Upper date string: %s' % upper_str)
lower_time = time.strptime(lower_str, "%Y-%m-%d %H:%M")
lower_dt = datetime.datetime.fromtimestamp(time.mktime(lower_time))
upper_time = time.strptime(upper_str, "%Y-%m-%d %H:%M")
upper_dt = datetime.datetime.fromtimestamp(time.mktime(upper_time))
if current_time < lower_dt or current_time > upper_dt:
is_within_time_range = False
break
log.debug('Is current time within range?: %s' %
str(is_within_time_range))
return is_within_time_range
def _message_queue_ok(self, message_count, dry=False):
"""Check if the *message_count* breaches the configured thresholds.
Will send email to support if thresholds are breached. Furthermore,
if the higher threshold is breached, the comms facility will be
terminated pending further investigation.
**Args:**
*message_count*: message queue length
**Kwargs:**
*dry*: only report, do not execute
**Returns**:
``boolean``::
``True`` if queue lengths are within accepted thresholds
``False`` if queue lengths are NOT within accepted thresholds
"""
queue_ok = True
current_dt_str = datetime.datetime.now().strftime('%c')
if message_count > self.q_error:
log.info('Message queue count %d breaches error threshold %d' %
(message_count, self.q_error))
queue_ok = False
subject = ('Error - Comms message count was at %d' %
message_count)
d = {'count': message_count,
'date': current_dt_str,
'error_threshold': self.q_error}
mime = self.emailer.create_comms(subject=subject,
data=d,
template='message_q_err',
prod=self.prod)
self._emailer.set_recipients(self.support_emails)
self.emailer.send(mime_message=mime, dry=dry)
elif message_count > self.q_warning:
log.info('Comms queue count %d breaches warning threshold %d' %
(message_count, self.q_warning))
subject = ('Warning - Comms message count was at %d' %
message_count)
d = {'count': message_count,
'date': current_dt_str,
'warning_threshold': self.q_warning}
mime = self.emailer.create_comms(subject=subject,
data=d,
template='message_q_warn',
prod=self.prod)
self._emailer.set_recipients(self.support_emails)
self.emailer.send(mime_message=mime, dry=dry)
return queue_ok
def get_comms_files(self, template=None):
"""Produce a list of files in the :attr:`comms_dir`.
Comms files are matched based on the following pattern::
<action>.<job_item.id>.<template>
where:
* ``<action>`` is the communications medium (either SMS or email are
supported)
job_item table
* ``<job_item.id>`` is the integer based primary key from the
job_item table
* ``<template>`` is the string template used to build the message
content
**Kwargs:**
*template*: template token to filter comms event files against
**Returns:**
list of files to process or empty list if the :attr:`comms_dir`
is not defined or does not exist
"""
log.debug('Searching for comms in dir: %s' % self.comms_dir)
comms_files = []
filter = '^(email|sms)\.(\d+)\.(\w+)$'
if template is not None:
filter = '^(email|sms)\.(\d+)\.(%s)$' % template
comms_files.extend(get_directory_files_list(self.comms_dir,
filter))
log.debug('Comms event files found: "%s"' % comms_files)
return comms_files
|
989,152 | 19a4248f32c5faf34d94bfa721d445fa84324e2b | import os
import time
import shutil
import mplfinance as mpf
import pandas as pd
from tqdm import tqdm
def get_kchart(src):
src = src[:-4]
df = pd.read_csv(f'./stock_data/{src}.csv')
data = df.drop(columns='code')
data.index = pd.DatetimeIndex(data['date'])
save = dict(fname=f'./static/k_chart/{src}.png', dpi=80, pad_inches=0.25)
mpf.plot(data.tail(60), type='candle',
volume=True, savefig=save, style='yahoo')
def clear_kchart_buffers():
path = './static/k_chart'
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.mkdir(path)
def print_kchart():
clear_kchart_buffers()
src_path = './stock_data'
src_list = os.listdir(src_path)
pbar = tqdm(src_list)
for src in pbar:
pbar.set_description('Printing ' + src)
get_kchart(src)
if __name__ == '__main__':
print_kchart()
# time_start = time.time()
# clear_kchart_buffers()
# src_path = './stock_data'
# src_list = os.listdir(src_path)
# for src in src_list:
# get_kchart(src)
# print(f"Printing the k_chart of {src}")
# print('-----------------------------------')
# time_end = time.time()
# time_duration = round(time_end - time_start, 2)
# print(f"Takes up {time_duration} s to complete printing.")
|
989,153 | 8dfa24d920e8667a17e489e27eaf1e43e79ccaa7 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
REQUIREMENTS = ['re']
CLASSIFIERS = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
setuptools.setup(name='metinanaliz',
version='1.1.6',
description='Türkçe Metin Analizi',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/karanba/MetinAnaliz',
author='Altay Karakuş',
author_email='altaykarakus@gmail.com',
license='MIT',
packages= setuptools.find_packages(),
classifiers=CLASSIFIERS,
python_requires='>=3.6',
keywords='analiz, metin, türkçe'
) |
989,154 | 735e72b140ba8610a88b7b2dda52d0ec203125b1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt4.QtGui import QListView, \
QMessageBox, QListWidgetItem, QStandardItemModel, QFont,\
QAbstractItemView, QStandardItem
from PyQt4.QtCore import QString, SIGNAL, QSize, Qt, pyqtSlot
from controller.game_controller import GameController
import logging
class GameListWidget(QListView):
def __init__(self, parent=None):
QListView.__init__(self, parent)
self.game_list = []
self.model = QStandardItemModel()
self.setModel(self.model)
self.setWordWrap(True)
self.setUniformItemSizes(True)
self.setGridSize(QSize(self.rect().width(), 30))
self.setFont(QFont("Microsoft YaHei", 10))
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
# self.setFocusPolicy(Qt.NoFocus)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
#self.setAcceptDrops(True)
self.game_controller = GameController()
self.game_controller.connector.connect(SIGNAL('game_list'), self.add_game_item)
self.game_controller.connector.connect(SIGNAL('game_list_clear'), self.clear)
self.clicked.connect(self.double_click_on_item) ## ??????
def double_click_on_item(self, idx):
print('%d was clicked' % (idx))
self.emit(SIGNAL("enter_room(QString, QString)"), self.game_list[idx][0],self.game_list[idx][1] )
def add_game_item(self, txt, id):
self.game_list.append((id, txt))
item = QStandardItem(txt)
item.setTextAlignment(Qt.AlignCenter)
self.model.appendRow(item)
def clear(self):
self.model.clear() |
989,155 | 3e91e00200ba5d20479e1b6717f8d7dfa4f9355d | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pandas_datareader.data as web
from part1 import apple, start, end
apple['20d-50d'] = apple['20d'] - apple['50d']
apple["Regime"] = np.where(apple['20d-50d'] > 0, 1, 0)
# We have 1's for bullish regimes and 0's for everything else. Below I replace bearish regimes's values with -1, and to maintain the rest of the vector, the second argument is apple["Regime"]
apple["Regime"] = np.where(apple['20d-50d'] < 0, -1, apple["Regime"])
# apple.loc['2016-01-01':'2016-08-07',"Regime"].plot(ylim = (-2,2)).axhline(y = 0, color = "black", lw = 2)
# apple.to_csv('apple.csv')
apple["Regime"].plot(ylim = (-2,2)).axhline(y = 0, color = "black", lw = 2)
apple["Regime"].value_counts()
regime_orig = apple.ix[-1, "Regime"]
apple.ix[-1, "Regime"] = 0
apple["Signal"] = np.sign(apple["Regime"] - apple["Regime"].shift(1))
# Restore original regime data
apple.ix[-1, "Regime"] = regime_orig
apple.tail()
#apple["Signal"].plot(ylim = (-2, 2))
apple.loc[apple["Signal"] == 1, "Close"]
apple_signals = pd.concat([
pd.DataFrame({"Price": apple.loc[apple["Signal"] == 1, "Close"],
"Regime": apple.loc[apple["Signal"] == 1, "Regime"],
"Signal": "Buy"}),
pd.DataFrame({"Price": apple.loc[apple["Signal"] == -1, "Close"],
"Regime": apple.loc[apple["Signal"] == -1, "Regime"],
"Signal": "Sell"}),
])
apple_signals.sort_index(inplace = True)
apple_signals
apple_long_profits = pd.DataFrame({
"Price": apple_signals.loc[(apple_signals["Signal"] == "Buy") & apple_signals["Regime"] == 1, "Price"],
"Profit": pd.Series(apple_signals["Price"] - apple_signals["Price"].shift(1)).loc[
apple_signals.loc[(apple_signals["Signal"].shift(1) == "Buy") & (apple_signals["Regime"].shift(1) == 1)].index
].tolist(),
"End Date": apple_signals["Price"].loc[
apple_signals.loc[(apple_signals["Signal"].shift(1) == "Buy") & (apple_signals["Regime"].shift(1) == 1)].index
].index
})
apple_long_profits
def ohlc_adj(dat):
"""
:param dat: pandas DataFrame with stock data, including "Open", "High", "Low", "Close", and "Adj Close", with "Adj Close" containing adjusted closing prices
:return: pandas DataFrame with adjusted stock data
This function adjusts stock data for splits, dividends, etc., returning a data frame with
"Open", "High", "Low" and "Close" columns. The input DataFrame is similar to that returned
by pandas Yahoo! Finance API.
"""
return pd.DataFrame({"Open": dat["Open"] * dat["Adj Close"] / dat["Close"],
"High": dat["High"] * dat["Adj Close"] / dat["Close"],
"Low": dat["Low"] * dat["Adj Close"] / dat["Close"],
"Close": dat["Adj Close"]})
apple_adj = ohlc_adj(apple)
# This next code repeats all the earlier analysis we did on the adjusted data
apple_adj["20d"] = np.round(apple_adj["Close"].rolling(window = 20, center = False).mean(), 2)
apple_adj["50d"] = np.round(apple_adj["Close"].rolling(window = 50, center = False).mean(), 2)
apple_adj["200d"] = np.round(apple_adj["Close"].rolling(window = 200, center = False).mean(), 2)
apple_adj['20d-50d'] = apple_adj['20d'] - apple_adj['50d']
# np.where() is a vectorized if-else function, where a condition is checked for each component of a vector, and the first argument passed is used when the condition holds, and the other passed if it does not
apple_adj["Regime"] = np.where(apple_adj['20d-50d'] > 0, 1, 0)
# We have 1's for bullish regimes and 0's for everything else. Below I replace bearish regimes's values with -1, and to maintain the rest of the vector, the second argument is apple["Regime"]
apple_adj["Regime"] = np.where(apple_adj['20d-50d'] < 0, -1, apple_adj["Regime"])
# To ensure that all trades close out, I temporarily change the regime of the last row to 0
regime_orig = apple_adj.ix[-1, "Regime"]
apple_adj.ix[-1, "Regime"] = 0
apple_adj["Signal"] = np.sign(apple_adj["Regime"] - apple_adj["Regime"].shift(1))
# Restore original regime data
apple_adj.ix[-1, "Regime"] = regime_orig
# Create a DataFrame with trades, including the price at the trade and the regime under which the trade is made.
apple_adj_signals = pd.concat([
pd.DataFrame({"Price": apple_adj.loc[apple_adj["Signal"] == 1, "Close"],
"Regime": apple_adj.loc[apple_adj["Signal"] == 1, "Regime"],
"Signal": "Buy"}),
pd.DataFrame({"Price": apple_adj.loc[apple_adj["Signal"] == -1, "Close"],
"Regime": apple_adj.loc[apple_adj["Signal"] == -1, "Regime"],
"Signal": "Sell"}),
])
apple_adj_signals.sort_index(inplace = True)
apple_adj_long_profits = pd.DataFrame({
"Price": apple_adj_signals.loc[(apple_adj_signals["Signal"] == "Buy") &
apple_adj_signals["Regime"] == 1, "Price"],
"Profit": pd.Series(apple_adj_signals["Price"] - apple_adj_signals["Price"].shift(1)).loc[
apple_adj_signals.loc[(apple_adj_signals["Signal"].shift(1) == "Buy") & (apple_adj_signals["Regime"].shift(1) == 1)].index
].tolist(),
"End Date": apple_adj_signals["Price"].loc[
apple_adj_signals.loc[(apple_adj_signals["Signal"].shift(1) == "Buy") & (apple_adj_signals["Regime"].shift(1) == 1)].index
].index
})
tradeperiods = pd.DataFrame({"Start": apple_adj_long_profits.index,
"End": apple_adj_long_profits["End Date"]})
apple_adj_long_profits["Low"] = tradeperiods.apply(lambda x: min(apple_adj.loc[x["Start"]:x["End"], "Low"]), axis = 1)
cash = 1000000
apple_backtest = pd.DataFrame({"Start Port. Value": [],
"End Port. Value": [],
"End Date": [],
"Shares": [],
"Share Price": [],
"Trade Value": [],
"Profit per Share": [],
"Total Profit": [],
"Stop-Loss Triggered": []})
port_value = .1 # Max proportion of portfolio bet on any trade
batch = 100 # Number of shares bought per batch
stoploss = .2 # % of trade loss that would trigger a stoploss
for index, row in apple_adj_long_profits.iterrows():
batches = np.floor(cash * port_value) // np.ceil(batch * row["Price"]) # Maximum number of batches of stocks invested in
trade_val = batches * batch * row["Price"] # How much money is put on the line with each trade
if row["Low"] < (1 - stoploss) * row["Price"]: # Account for the stop-loss
share_profit = np.round((1 - stoploss) * row["Price"], 2)
stop_trig = True
else:
share_profit = row["Profit"]
stop_trig = False
profit = share_profit * batches * batch # Compute profits
# Add a row to the backtest data frame containing the results of the trade
apple_backtest = apple_backtest.append(pd.DataFrame({
"Start Port. Value": cash,
"End Port. Value": cash + profit,
"End Date": row["End Date"],
"Shares": batch * batches,
"Share Price": row["Price"],
"Trade Value": trade_val,
"Profit per Share": share_profit,
"Total Profit": profit,
"Stop-Loss Triggered": stop_trig
}, index = [index]))
cash = max(0, cash + profit)
apple_backtest
def ma_crossover_orders(stocks, fast, slow):
"""
:param stocks: A list of tuples, the first argument in each tuple being a string containing the ticker symbol of each stock (or however you want the stock represented, so long as it's unique), and the second being a pandas DataFrame containing the stocks, with a "Close" column and indexing by date (like the data frames returned by the Yahoo! Finance API)
:param fast: Integer for the number of days used in the fast moving average
:param slow: Integer for the number of days used in the slow moving average
:return: pandas DataFrame containing stock orders
This function takes a list of stocks and determines when each stock would be bought or sold depending on a moving average crossover strategy, returning a data frame with information about when the stocks in the portfolio are bought or sold according to the strategy
"""
fast_str = str(fast) + 'd'
slow_str = str(slow) + 'd'
ma_diff_str = fast_str + '-' + slow_str
trades = pd.DataFrame({"Price": [], "Regime": [], "Signal": []})
for s in stocks:
# Get the moving averages, both fast and slow, along with the difference in the moving averages
s[1][fast_str] = np.round(s[1]["Close"].rolling(window = fast, center = False).mean(), 2)
s[1][slow_str] = np.round(s[1]["Close"].rolling(window = slow, center = False).mean(), 2)
s[1][ma_diff_str] = s[1][fast_str] - s[1][slow_str]
# np.where() is a vectorized if-else function, where a condition is checked for each component of a vector, and the first argument passed is used when the condition holds, and the other passed if it does not
s[1]["Regime"] = np.where(s[1][ma_diff_str] > 0, 1, 0)
# We have 1's for bullish regimes and 0's for everything else. Below I replace bearish regimes's values with -1, and to maintain the rest of the vector, the second argument is apple["Regime"]
s[1]["Regime"] = np.where(s[1][ma_diff_str] < 0, -1, s[1]["Regime"])
# To ensure that all trades close out, I temporarily change the regime of the last row to 0
regime_orig = s[1].ix[-1, "Regime"]
s[1].ix[-1, "Regime"] = 0
s[1]["Signal"] = np.sign(s[1]["Regime"] - s[1]["Regime"].shift(1))
# Restore original regime data
s[1].ix[-1, "Regime"] = regime_orig
# Get signals
signals = pd.concat([
pd.DataFrame({"Price": s[1].loc[s[1]["Signal"] == 1, "Close"],
"Regime": s[1].loc[s[1]["Signal"] == 1, "Regime"],
"Signal": "Buy"}),
pd.DataFrame({"Price": s[1].loc[s[1]["Signal"] == -1, "Close"],
"Regime": s[1].loc[s[1]["Signal"] == -1, "Regime"],
"Signal": "Sell"}),
])
signals.index = pd.MultiIndex.from_product([signals.index, [s[0]]], names = ["Date", "Symbol"])
trades = trades.append(signals)
trades.sort_index(inplace = True)
trades.index = pd.MultiIndex.from_tuples(trades.index, names = ["Date", "Symbol"])
return trades
def backtest(signals, cash, port_value = .1, batch = 100, flat_commision = 35):
"""
:param signals: pandas DataFrame containing buy and sell signals with stock prices and symbols, like that returned by ma_crossover_orders
:param cash: integer for starting cash value
:param port_value: maximum proportion of portfolio to risk on any single trade
:param batch: Trading batch sizes
:return: pandas DataFrame with backtesting results
This function backtests strategies, with the signals generated by the strategies being passed in the signals DataFrame. A fictitious portfolio is simulated and the returns generated by this portfolio are reported.
"""
SYMBOL = 1 # Constant for which element in index represents symbol
portfolio = dict() # Will contain how many stocks are in the portfolio for a given symbol
port_prices = dict() # Tracks old trade prices for determining profits
# Dataframe that will contain backtesting report
results = pd.DataFrame({"Start Cash": [],
"End Cash": [],
"Portfolio Value": [],
"Type": [],
"Shares": [],
"Share Price": [],
"Trade Value": [],
"Profit per Share": [],
"Total Profit": []})
print results
for index, row in signals.iterrows():
# These first few lines are done for any trade
shares = portfolio.setdefault(index[SYMBOL], 0)
trade_val = 0
batches = 0
cash_change = row["Price"] * shares # Shares could potentially be a positive or negative number (cash_change will be added in the end; negative shares indicate a short)
#print cash_change
portfolio[index[SYMBOL]] = 0 # For a given symbol, a position is effectively cleared
old_price = port_prices.setdefault(index[SYMBOL], row["Price"])
portfolio_val = 0
if shares != 0:
#print cash_change
cash_change = cash_change - flat_commision
#print cash_change
print "Selling" + " " +index[SYMBOL]
for key, val in portfolio.items():
portfolio_val += val * port_prices[key]
if row["Signal"] == "Buy" and row["Regime"] == 1: # Entering a long position
batches = np.floor((portfolio_val + cash) * port_value) // np.ceil(batch * row["Price"]) # Maximum number of batches of stocks invested in
trade_val = batches * batch * row["Price"] + flat_commision # How much money is put on the line with each trade
if trade_val > flat_commision:
#print trade_val
print "Long Position" + " " +index[SYMBOL]
cash_change -= trade_val # We are buying shares so cash will go down
portfolio[index[SYMBOL]] = batches * batch # Recording how many shares are currently invested in the stock
port_prices[index[SYMBOL]] = row["Price"] # Record price
old_price = row["Price"]
elif row["Signal"] == "Sell" and row["Regime"] == -1: # Entering a short
pass
# Do nothing; can we provide a method for shorting the market?
#else:
#raise ValueError("I don't know what to do with signal " + row["Signal"])
pprofit = row["Price"] - old_price # Compute profit per share; old_price is set in such a way that entering a position results in a profit of zero
# Update report
results = results.append(pd.DataFrame({
"Start Cash": cash,
"End Cash": cash + cash_change,
"Portfolio Value": cash + cash_change + portfolio_val + trade_val,
"Type": row["Signal"],
"Shares": batch * batches,
"Share Price": row["Price"],
"Trade Value": abs(cash_change),
"Profit per Share": pprofit,
"Total Profit": batches * batch * pprofit
}, index = [index]))
#print cash, cash_change, row["Signal"],row["Regime"], index[SYMBOL]
cash += cash_change # Final change to cash balance
results.sort_index(inplace = True)
results.index = pd.MultiIndex.from_tuples(results.index, names = ["Date", "Symbol"])
print portfolio, portfolio_val, cash
return results
microsoft = web.DataReader("MSFT", "yahoo", start, end)
google = web.DataReader("GOOG", "yahoo", start, end)
facebook = web.DataReader("FB", "yahoo", start, end)
twitter = web.DataReader("TWTR", "yahoo", start, end)
netflix = web.DataReader("NFLX", "yahoo", start, end)
amazon = web.DataReader("AMZN", "yahoo", start, end)
yahoo = web.DataReader("YHOO", "yahoo", start, end)
sony = web.DataReader("SNY", "yahoo", start, end)
nintendo = web.DataReader("NTDOY", "yahoo", start, end)
ibm = web.DataReader("IBM", "yahoo", start, end)
hp = web.DataReader("HPQ", "yahoo", start, end)
signals = ma_crossover_orders([("AAPL", ohlc_adj(apple)),
("MSFT", ohlc_adj(microsoft)),
("GOOG", ohlc_adj(google)),
("FB", ohlc_adj(facebook)),
("TWTR", ohlc_adj(twitter)),
("NFLX", ohlc_adj(netflix)),
("AMZN", ohlc_adj(amazon)),
("YHOO", ohlc_adj(yahoo)),
("SNY", ohlc_adj(yahoo)),
("NTDOY", ohlc_adj(nintendo)),
("IBM", ohlc_adj(ibm)),
("HPQ", ohlc_adj(hp))],
fast = 20, slow = 50)
bk = backtest(signals, 10000, port_value = .1, batch = 100, flat_commision = 35)
bk
#bk["Portfolio Value"].groupby(level = 0).apply(lambda x: x[-1]).plot()
spyder = web.DataReader("^DJA", "yahoo", start, end)
spyder.iloc[[0,-1],:]
#ax_bench = (spyder["Adj Close"] / spyder.ix[0, "Adj Close"]).plot(label = "SPY")
#ax_bench = (.groupby(level = 0).apply(lambda x: x[-1]) / 1000000).plot(ax = ax_bench, label = "Portfolio")
#ax_bench.legend(ax_bench.get_lines(), [l.get_label() for l in ax_bench.get_lines()], loc = 'best')
#ax_bench
|
989,156 | 1233bb5776f9cd98adf89a7663db301780c0b485 | from pylab import plot, draw, pause
from numpy import arange, sin, pi
from time import time, sleep
x = arange(0, 2 * pi, 0.01)
tstart = time()
line, = plot(x, sin(x))
for i in arange(1, 200):
line.set_ydata(sin(x + i / 10.0))
draw()
pause(0.0001)
fps_mpl = int(200 / (time() - tstart))
print('fps (mpl): %4d' % fps_mpl)
from gr.pygr import plot
tstart = time()
for i in arange(1, 200):
plot(x, sin(x + i / 10.0))
sleep(0.0001)
fps_gr = int(200 / (time() - tstart))
print('fps (GR): %4d' % fps_gr)
print(' speedup: %6.1f' % (float(fps_gr) / fps_mpl))
|
989,157 | 431626006001e4e55a4a0d8f587819ae643be3db | # Generated by Django 3.0.4 on 2020-03-22 23:12
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0009_auto_20200323_0429'),
]
operations = [
migrations.AlterField(
model_name='patienthistory',
name='place_name',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='patienthistory',
name='travel_mode',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='patienthistory',
name='type',
field=models.CharField(choices=[('placeVisit', 'placeVisit'), ('travel', 'travel')], max_length=15, null=True),
),
]
|
989,158 | 07eb191a07b1b58e2df7e3a6fd33190c07a76ce7 | from __future__ import absolute_import, division, print_function
import datashape
from datashape import (DataShape, Record, Mono, dshape, to_numpy,
to_numpy_dtype, discover)
from datashape.predicates import isrecord, iscollection
from datashape.dispatch import dispatch
import h5py
import numpy as np
from toolz import assoc, keyfilter
from ..append import append
from ..convert import convert, ooc_types
from ..create import create
from ..resource import resource
from ..chunks import chunks, Chunks
from ..compatibility import unicode
h5py_attributes = ['chunks', 'compression', 'compression_opts', 'dtype',
'fillvalue', 'fletcher32', 'maxshape', 'shape']
@discover.register((h5py.Group, h5py.File))
def discover_h5py_group_file(g):
return DataShape(Record([[k, discover(v)] for k, v in g.items()]))
@discover.register(h5py.Dataset)
def discover_h5py_dataset(d):
s = str(datashape.from_numpy(d.shape, d.dtype))
return dshape(s.replace('object', 'string'))
def varlen_dtype(dt):
""" Inject variable length string element for 'O' """
if "'O'" not in str(dt):
return dt
varlen = h5py.special_dtype(vlen=unicode)
return np.dtype(eval(str(dt).replace("'O'", 'varlen')))
def dataset_from_dshape(file, datapath, ds, **kwargs):
dtype = varlen_dtype(to_numpy_dtype(ds))
if datashape.var not in list(ds):
shape = to_numpy(ds)[0]
elif datashape.var not in list(ds)[1:]:
shape = (0,) + to_numpy(ds.subshape[0])[0]
else:
raise ValueError("Don't know how to handle varlen nd shapes")
if shape:
kwargs['chunks'] = kwargs.get('chunks', True)
kwargs['maxshape'] = kwargs.get('maxshape', (None,) + shape[1:])
kwargs2 = keyfilter(h5py_attributes.__contains__, kwargs)
return file.require_dataset(datapath, shape=shape, dtype=dtype, **kwargs2)
def create_from_datashape(group, ds, name=None, **kwargs):
if not isrecord(ds):
raise ValueError(
"Trying to create an HDF5 file with non-record datashape failed\n"
"Perhaps you forgot to specify a datapath?\n"
"\tdshape: %s\n"
"If you're using into consider the following change\n"
"\tBefore: into('myfile.hdf5', data)\n"
"\tAfter: into('myfile.hdf5::/datapath', data)" % ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
for name, sub_ds in ds.dict.items():
if isrecord(sub_ds):
g = group.require_group(name)
create_from_datashape(g, sub_ds, **kwargs)
else:
dataset_from_dshape(file=group.file,
datapath='/'.join([group.name, name]),
ds=sub_ds, **kwargs)
@create.register(h5py.File)
def create_h5py_file(cls, path=None, dshape=None, **kwargs):
f = h5py.File(path)
create_from_datashape(f, dshape, **kwargs)
return f
@append.register(h5py.Dataset, np.ndarray)
def append_h5py(dset, x, **kwargs):
if not sum(x.shape):
return dset
shape = list(dset.shape)
shape[0] += len(x)
dset.resize(shape)
dset[-len(x):] = x
return dset
@append.register(h5py.Dataset, chunks(np.ndarray))
def append_h5py(dset, c, **kwargs):
for chunk in c:
append(dset, chunk)
return dset
@append.register(h5py.Dataset, object)
def append_h5py(dset, x, **kwargs):
return append(dset, convert(chunks(np.ndarray), x, **kwargs), **kwargs)
@convert.register(np.ndarray, h5py.Dataset, cost=3.0)
def h5py_to_numpy(dset, force=False, **kwargs):
if dset.size > 1e9:
raise MemoryError("File size is large: %0.2f GB.\n"
"Convert with flag force=True to force loading" % d.size / 1e9)
else:
return dset[:]
@convert.register(chunks(np.ndarray), h5py.Dataset, cost=3.0)
def h5py_to_numpy_chunks(dset, chunksize=2**20, **kwargs):
def load():
for i in range(0, dset.shape[0], chunksize):
yield dset[i: i + chunksize]
return chunks(np.ndarray)(load)
@resource.register('h5py://.+', priority=11)
def resource_h5py(uri, datapath=None, dshape=None, **kwargs):
f = h5py.File(uri)
olddatapath = datapath
if dshape is not None:
ds = datashape.dshape(dshape)
if datapath:
while ds and datapath:
datapath, name = datapath.rsplit('/', 1)
ds = Record([[name, ds]])
ds = datashape.dshape(ds)
f = create(h5py.File, path=uri, dshape=ds, **kwargs)
if olddatapath:
return f[olddatapath]
else:
return f
@resource.register('.+\.(hdf5|h5)')
def resource_hdf5(*args, **kwargs):
return resource_h5py(*args, **kwargs)
@dispatch((h5py.Group, h5py.Dataset))
def drop(h):
del h.file[h.name]
@dispatch(h5py.File)
def drop(h):
os.remove(h.filename)
ooc_types.add(h5py.Dataset)
|
989,159 | 482847d7e8176beecd8ac4e981d8dc3689aca6cd | #!/usr/bin/env python
# coding: utf-8
#%% ---- DEPENDENCIES
import argparse
from collections import Counter
import sys
import numpy as np
import pandas as pd
import manynames as mn
#%% ---- FUNCTIONS TO RECREATE AGREEMENT TABLE
def snodgrass_agreement(rdict, vocab, singletons=False):
# to decide: do we include singleton answers for calculating agreement?
if singletons:
vec = np.array([rdict[key] for key in rdict])
else:
vec = np.array([rdict[key] for key in rdict if vocab[key] > 1])
vec_rel = vec/(np.sum(vec))
agr = np.sum(vec_rel * np.log2(1/vec_rel))
return agr
def percent_agreement(rdict):
# to decide: do we include singleton answers for calculating agreement?
topname = rdict.most_common(1)[0][0]
total = sum(rdict.values())
return rdict[topname]/total
def make_df(manynames):
vg_is_common = []
vg_prop = []
ntypesmin2 = []
if not isinstance(manynames.iloc[0]['responses'], Counter):
manynames['responses'] = manynames['responses'].apply(lambda x: Counter(x))
for ix,row in manynames.iterrows():
vg_is_common.append(int(row['topname'] == row['vg_obj_name']))
vg_weight = row['responses'][row['vg_obj_name']]/sum(row['responses'].values())
vg_prop.append(vg_weight)
min2types = [k for k in row['responses'].keys() if row['responses'][k] > 1]
ntypesmin2.append(len(min2types))
manynames['n_types_min2'] = ntypesmin2
manynames['percent_agree_min2'] = manynames['responses'].apply(lambda x: percent_agreement(x))
manynames['snodgrass_min2'] = manynames['responses'].apply(lambda x: snodgrass_agreement(x,{},True))
manynames['vg_is_max'] = vg_is_common
manynames['vg_mean'] = vg_prop
return manynames
def make_agreement_table(resdf):
nobjects = len(resdf)
tablerows = []
tablerows.append(('all',
str("%.1f"%np.mean(resdf['n_types_min2'])),
str("%.1f (%.1f)"%(np.mean(resdf['percent_agree_min2'])*100,
np.std(resdf['percent_agree_min2'])*100)),
str("%.1f (%.1f)"%(np.mean(resdf['snodgrass_min2']),
np.std(resdf['snodgrass_min2']))),
str("%.1f"%((np.sum(resdf['vg_is_max'])/nobjects)*100)),
str("%.1f"%((np.sum(resdf['vg_mean'])/nobjects)*100)),
))
for c in set(list(resdf['vg_domain'])):
catdf = resdf[resdf['vg_domain'] == c]
ncat = len(catdf)
synagree = Counter()
for s in set(list(catdf['vg_synset'])):
syndf = catdf[catdf['vg_synset'] == s]
synagree[s] = np.mean(syndf['vg_mean'])
tablerows.append((c,
str("%.1f"%np.mean(catdf['n_types_min2'])),
str("%.1f (%.1f)"%(np.mean(catdf['percent_agree_min2'])*100,
np.std(catdf['percent_agree_min2'])*100)),
str("%.1f (%.1f)"%(np.mean(catdf['snodgrass_min2']),
np.std(catdf['snodgrass_min2']))),
str("%.1f"%((np.sum(catdf['vg_is_max'])/ncat)*100)),
str("%.1f"%((np.sum(catdf['vg_mean'])/ncat)*100)),
))
outdf = pd.DataFrame(tablerows,columns=['domain','N','%top','H','top=VG','%VG'])
outdf['domain'] = pd.Categorical(outdf['domain'],
['all', 'people', 'clothing', 'home', 'buildings',
'food', 'vehicles', 'animals_plants'])
return outdf
#%% ---- MAIN
if __name__ == '__main__':
#%%% ----- CHECK ARGUMENTS
#setup argument parser
arg_parser = argparse.ArgumentParser(
description = '''Creates a summary table of name agreement indices
(reproducing Table 3 in [Silberer, Zarrieß, & Boleda,2020)''')
#add required arguments
arg_parser.add_argument('-mnfile', type=str,
help='''the path to manynames.tsv''',
default='../manynames.tsv')
#check provided arguments
args = arg_parser.parse_args()
#set values
fn = args.mnfile
#%%% ----- PROCESSING
manynames = mn.load_cleaned_results(fn)
resdf = make_df(manynames)
o1 = make_agreement_table(resdf)
print(o1.sort_values(by = 'domain'))
|
989,160 | f28985d49730f1a10dd23e794c06fa25604b7c76 | # @Date: 2020-05-03T21:24:44+05:30
# @Last modified time: 2020-05-03T21:28:04+05:30
# https://leetcode.com/contest/weekly-contest-187/problems/check-if-all-1s-are-at-least-length-k-places-away/
# Check If All 1's Are at Least Length K Places Away
nums = [1,0,0,0,1,0,0,1]
k = 2
import sys
if(k==0):
print(True)
initial = -sys.maxsize- 1
for i in range(len(nums)):
if(nums[i]==1):
if(initial!=(-sys.maxsize- 1) and i-initial <= k):
print(False)
else:
initial = i
print(True)
|
989,161 | 1a04bd42e3319cfe74321f48db6cc25082a85fd0 | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from datetime import date, datetime
from django_countries.fields import CountryField
from django.db.models.signals import post_save
from django.dispatch import receiver
class Event(models.Model):
organizer = models.ForeignKey(User, default=1, on_delete=models.CASCADE)
title = models.CharField(max_length=120)
description = models.TextField()
date= models.DateField()
time=models.TimeField()
seats = models.IntegerField(null=True,blank= True)
def __str__(self):
return "ID:%s Event:%s " % (self.id, self.title)
class BookedEvent(models.Model):
event= models.ForeignKey(Event, default=1, on_delete=models.CASCADE)
user = models.ForeignKey(User, default=1, on_delete=models.CASCADE)
tickets = models.IntegerField(null=True,blank= True)
def check_seats(self):
return (self.tickets <= self.event.seats)
def get_seats(self):
return self.event.seats
def __str__(self):
return "ID:%s Event:%s User:%s" % (self.id, self.event.title, self.user.username)
class Profile(models.Model):
user = models.OneToOneField(User, default=1, on_delete=models.CASCADE)
location = CountryField()
bio = models.TextField(max_length=300, blank=True)
birth_date = models.DateField(null=True, blank=True)
profile_pic = models.ImageField(default="/profile_pic/pic placeholder.png/",upload_to='profile_pic', null=True, blank=True)
def __str__(self):
return "ID:%s User:%s " % (self.id, self.user.username)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Follower(models.Model):
follower = models.ForeignKey(User, related_name='following', on_delete=models.CASCADE)
followed = models.ForeignKey(User, related_name='followers', on_delete=models.CASCADE)
class Meta:
unique_together = ('follower', 'followed')
def __str__(self):
return '%s follows %s' % (self.follower.username, self.followed.username)
class LikedUser(models.Model):
liker = models.ForeignKey(User, related_name='liking' , on_delete=models.CASCADE)
liked = models.ForeignKey(User, related_name='likee' , on_delete=models.CASCADE)
class Meta:
unique_together = ('liker', 'liked')
def __str__(self):
return '%s likes %s' % (self.liker.username, self.liked.username) |
989,162 | ebbe08267baf4ddf0ede9da294e44694d81693d4 | import os
import json
import time
from uuid import uuid1
import click
from cli.utils import Spotify
from cli.utils.constants import AUTH_URL, CREDS_PATH
@click.command()
def login():
"""Authorize spotify-cli to access the Spotify API."""
url = AUTH_URL + '&state=' + str(uuid1())
try:
import webbrowser
webbrowser.open(url)
except:
pass
print('Go to the following link in your browser:\n\n\t{}\n'.format(url))
auth_code = input('Enter verification code: ')
print('\nObtaining access token...')
Spotify.refresh(auth_code)
print('Credentials saved to {}'.format(CREDS_PATH))
return
@click.command()
@click.option(
'-v', '--verbose', is_flag=True,
help='Output more info (i.e. credential storage)'
)
def status(verbose):
"""Show who's logged in."""
user_data = Spotify.request('me', method='GET')
click.echo('Logged in as {}'.format(user_data['display_name']))
if verbose:
click.echo('Credentials stored in {}'.format(CREDS_PATH))
return
# CLI group
@click.group(
options_metavar='[<options>]',
subcommand_metavar='<command>'
)
def auth():
"""Manage user authentication for spotify-cli."""
pass
auth.add_command(login)
auth.add_command(status)
|
989,163 | ff560d44a468298a9654acb28242650ffd6a4b0a | from importlib import import_module
if __name__ == '__main__':
try:
module = import_module('my_package.sub_package1.module1')
module = import_module('.sub_package1.module1', 'my_package')
module = import_module('.module1', 'my_package.sub_package1')
except ImportError as e:
print(e)
else:
print(module.__name__)
|
989,164 | 09211fc5eb0562bc80c46e46c99a0ab2353428fc | #from matplotlib.mlab import griddata
from matplotlib import colors, colorbar
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.basemap import Basemap
from matplotlib.colors import LightSource
from numpy import arange, mean, percentile, array, unique, where, argsort, floor, ceil
from netCDF4 import Dataset as NetCDFFile
from gmt_tools import cpt2colormap
from os import path, walk, system
#from hmtk.parsers.catalogue.csv_catalogue_parser import CsvCatalogueParser, CsvCatalogueWriter
from misc_tools import remove_last_cmap_colour, dictlist2array
from mapping_tools import drawshapepoly, labelpolygon, annotate_cities, get_map_polygons, get_field_data, make_street_map
from io_catalogues import parse_ga_event_query
import shapefile
def return_csg_data(jsonfile):
import json
with open(jsonfile) as f:
data = json.load(f)
csg_data = []
for feature in data['features']:
tmp = {'lon':feature['geometry']['coordinates'][0],
'lat':feature['geometry']['coordinates'][1],
'status':feature['properties']['CSGSTATUS']}
# append to list
csg_data.append(tmp)
return csg_data
def parse_iris_stationlist(stationlist):
lines = open(stationlist).readlines()[3:]
staDict = []
for line in lines:
dat = line.strip().split('|')
tmp = {'sta': dat[1], 'lat': float(dat[2]), 'lon': float(dat[3]), \
'elev': float(dat[4]), 'place': dat[5]}
staDict.append(tmp)
return staDict
plt.rcParams['pdf.fonttype'] = 42
mpl.style.use('classic')
##########################################################################################
doLocal = False
# local
if doLocal == False:
urcrnrlat = -33.85+0.25
llcrnrlat = -34.36+0.14
urcrnrlon = 151.1
llcrnrlon = 150.4
##########################################################################################
# set up street map
##########################################################################################
# set map centroid
clon = mean([llcrnrlon, urcrnrlon])
clat = mean([llcrnrlat, urcrnrlat])
degrng = urcrnrlon-llcrnrlon
ll_buffer = 0.2
plt, m, ax = make_street_map(clat, clon, service='ESRI_Imagery_World_2D', ll_buffer = 0.5, \
xpixels = 1500, plt_inset = False, plt_marker = False)
'''
Map Services:
ESRI_Imagery_World_2D
ESRI_StreetMap_World_2D
NatGeo_World_Map
NGS_Topo_US_2D
Ocean_Basemap
USA_Topo_Maps
World_Imagery
World_Physical_Map
World_Shaded_Relief
World_Street_Map
World_Terrain_Base
World_Topo_Map
ESRI_Imagery_World_2D - good sat image
NatGeo_World_Map - nice road map - hard to read
'''
##########################################################################################
# add cities
##########################################################################################
numCities = 9
annotate_cities(numCities, plt, m, marker='s')
"""
# add extra locs
import matplotlib.patheffects as PathEffects
path_effects=[PathEffects.withStroke(linewidth=3, foreground="w")]
txtoff = 0.1
x, y = m(133.07, -14.92)
plt.plot(x, y, 's', markerfacecolor='k', markeredgecolor='k', markeredgewidth=0.5, markersize=6, zorder=11000)
x, y = m(133.07+txtoff, -14.92+txtoff)
plt.text(x, y, 'Mataranka', size=14, ha='left', weight='normal', path_effects=path_effects)
x, y = m(133.37, -16.25)
plt.plot(x, y, 's', markerfacecolor='k', markeredgecolor='k', markeredgewidth=0.5, markersize=6, zorder=11000)
x, y = m(133.37-txtoff, -16.25+txtoff)
plt.text(x, y, 'Daly Waters', size=14, ha='right', weight='normal', path_effects=path_effects)
x, y = m(133.54, -17.55)
plt.plot(x, y, 's', markerfacecolor='k', markeredgecolor='k', markeredgewidth=0.5, markersize=6, zorder=11000)
x, y = m(133.54-txtoff, -17.55+txtoff)
plt.text(x, y, 'Elliott', size=14, ha='right', weight='normal', path_effects=path_effects)
"""
##########################################################################################
# add shapefiles
##########################################################################################
shpfile = '../NT/shapefiles/PetroleumTitles28August2019.shp'
shpfile = 'PetroleumTitles28August2019.shp'
sf = shapefile.Reader(shpfile)
drawshapepoly(m, plt, sf, col='r',lw=0.75, alpha=0.5, fillshape = True)
jsonfile='NSW_CSG_Boreholes.geojson'
csg_data = return_csg_data(jsonfile)
status = dictlist2array(csg_data, 'status')
csg_lon = dictlist2array(csg_data, 'lon')
csg_lat = dictlist2array(csg_data, 'lat')
idx1 = where(status == 'Permanently Sealed')
x,y = m(csg_lon[idx1], csg_lat[idx1])
plt.plot(x,y,'ro',ms=7,label='Permanently Sealed')
idx1 = where(status == 'Not Producing Gas')
x,y = m(csg_lon[idx1], csg_lat[idx1])
plt.plot(x,y,'o',c='orange',ms=7,label='Not Producing Gas')
idx1 = where(status == 'Producing Gas')
x,y = m(csg_lon[idx1], csg_lat[idx1])
plt.plot(x,y,'o',c='limegreen',ms=7,label='Producing Gas', zorder=10000)
##########################################################################################
# plt stations
##########################################################################################
stationlist = '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/AU/gmap-stations-edit.txt'
stationlist = '/Users/trev/Documents/Networks/AU/gmap-stations-edit.txt'
staDict = parse_iris_stationlist(stationlist)
# plot stations
sta_lon = dictlist2array(staDict, 'lon')
sta_lat = dictlist2array(staDict, 'lat')
sta_code = dictlist2array(staDict, 'sta')
cmdnet = ('MABG', 'YARR', 'CATI', 'OKDL', 'WTPK')
cmdlat = []
cmdlon = []
for stla, stlo, sta in zip(sta_lat, sta_lon, sta_code):
for cn in cmdnet:
if sta == cn:
print(cn)
cmdlat.append(stla)
cmdlon.append(stlo)
x,y = m(sta_lon, sta_lat)
plt.plot(x, y, '^', c='w', ms=12, zorder=1000, label='Urban Monitoring Network')
x,y = m(cmdlon, cmdlat)
plt.plot(x, y, '^', c='yellow', ms=12, zorder=1000, label='Camden Seismic Network')
# label stas
urcrnrlat
llcrnrlat
urcrnrlon
llcrnrlon
import matplotlib.patheffects as PathEffects
path_effects=[PathEffects.withStroke(linewidth=3, foreground="w")]
for sta, slon, slat in zip(sta_code, sta_lon, sta_lat):
if slon >= llcrnrlon-ll_buffer and slon <= urcrnrlon+ll_buffer \
and slat >= llcrnrlat-ll_buffer and slat <= urcrnrlat+ll_buffer:
print(sta)
x,y = m(slon-0.005, slat+0.004)
plt.text(x, y, sta, size=15, c='royalblue', va='bottom', ha='right', weight='normal', \
path_effects=path_effects, zorder=11000)
plt.legend(loc=2, numpoints=1, fontsize=11)
##########################################################################################
# get land & lake polygons for masking
##########################################################################################
'''
polys = get_map_polygons(m)
#mask_outside_polygon(polys[1][::-1], ax=None)
#mask_outside_polygons(polys, 'lightskyblue', plt)
# get lake ploygons
polygons = []
for polygon in m.lakepolygons:
poly = polygon.get_coords()
plt.fill(poly[:,0], poly[:,1], 'lightblue')
polygons.append(poly)
'''
##########################################################################################
# make map inset
##########################################################################################
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
if doLocal == False:
axins = zoomed_inset_axes(ax, 0.0048, loc=3)
m2 = Basemap(projection='merc',\
llcrnrlon=111,llcrnrlat=-45, \
urcrnrlon=156,urcrnrlat=-9,\
rsphere=6371200.,resolution='l',area_thresh=10000)
m2.drawmapboundary(fill_color='0.8')
m2.fillcontinents(color='w', lake_color='0.8') #, zorder=0)
m2.drawcoastlines()
m2.drawcountries()
m2.drawstates()
# fill main area
xv = mean([llcrnrlon, urcrnrlon])
yv = mean([llcrnrlat, urcrnrlat])
x, y = m2(xv, yv)
plt.plot(x, y, 'rs',ms=6)
##########################################################################################
# label states
##########################################################################################
'''
state = ['WA', 'NT', 'SA', 'QLD', 'NSW', 'VIC', 'TAS']
slat = [-26, -21.0, -29.5, -23.0, -32.5, -37.1, -42.]
slon = [122, 133.5, 135.0, 144.5, 146.5, 143.6, 147.0]
for i, st in enumerate(state):
x, y = m(slon[i], slat[i])
plt.text(x, y, st, size=11, horizontalalignment='center', verticalalignment='center', weight='normal')
'''
##########################################################################################
# add colourbar
##########################################################################################
plt.savefig('camden_csg_boreholes.png', format='png', bbox_inches='tight', dpi=150)
plt.savefig('camden_csg_boreholes.svg', format='svg', bbox_inches='tight', dpi=150)
plt.show()
|
989,165 | acc4eaf3cea92e1973e9ae997392054bfb0fab6e | import numpy as np
from sklearn.feature_extraction import DictVectorizer
from UGPFM_FAST import pylibfm
#from UGPFM_FAST import test
import sys
import pandas as pd
from sklearn.model_selection import train_test_split
import argparse
import time
def parse_args():
parser = argparse.ArgumentParser(description="Run UGPFM.")
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--iter', type=int, default=100)
parser.add_argument('--fact', type=int, default=10)
return parser.parse_args()
args=parse_args()
#data=pd.read_csv('./level/data_difficulty_user.csv')
data=pd.read_csv('./coursera/review_complete_data_final.csv')
#X_train, X_test, y_train, y_test = train_test_split(data[['CourseId','user_id','Level_difficulty']], data['rating'], test_size = 0.12, shuffle=True)
X_train, X_test, y_train, y_test = train_test_split(data[['CourseId','Cogn','sentiment','confusion']], data['Score'], test_size = 0.12, shuffle=True, stratify=data['Score'])
dataFM=[]
y=[]
for index, row in X_train.iterrows():
#dataFM.append({ "CourseId": str(row['CourseId']), "user_id": str(row['user_id']), "Level_difficulty": str(row['Level_difficulty'])})
dataFM.append({ "CourseId": str(row['CourseId']),"Cogn": str(row['Cogn']),"sentiment": str(row['sentiment']),"confusion": str(row['confusion'])})
y.append(float(y_train[index]))
dataFM_test=[]
y_t=[]
for index, row in X_test.iterrows():
#dataFM_test.append({ "CourseId": str(row['CourseId']), "user_id": str(row['user_id']), "Level_difficulty": str(row['Level_difficulty'])})
dataFM_test.append({ "CourseId": str(row['CourseId']),"Cogn": str(row['Cogn']),"sentiment": str(row['sentiment']),"confusion": str(row['confusion'])})
y_t.append(float(y_test[index]))
v = DictVectorizer()
X_train = v.fit_transform(dataFM)
X_teste = v.transform(dataFM_test)
start1 = time.time()
ugpfm = pylibfm.UGPFM(num_factors=args.fact, num_iter=args.iter, verbose=True, task="regression", initial_learning_rate=args.lr, learning_rate_schedule="optimal")
ugpfm.fit(X_train,y)
end1 = time.time()
# Evaluate
preds1 = ugpfm.predict(X_teste)
# Evaluate
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
print("****************UGPFM***********************")
print("UGPFM MSE: %.5f" % mean_squared_error(y_t,preds1))
print("UGPFM RMSE: %.5f" % mean_squared_error(y_t,preds1,squared=False))
print("UGPFM MAE: %.5f" % mean_absolute_error(y_t,preds1))
print("UGPFM R² score: %.5f" % r2_score(y_t,preds1))
T1=end1-start1
print("time of execusion of UGPFM in secondes: %d" %T1) |
989,166 | 9dfbf8b1974a0dd7dd98a867a9b1d426cacc5691 | # Protokoll Photoelektronenspektroskopie
# Schrittweite Winkel 0.3°
# Minimum/Maximum --> k=0
# Abzählen der Streifen, Spektrum 7 ist alpha = 0
# Punkte auf positiver und negativer k-Achse
# x-y Achse beschriften, Einheiten wichtig, Winkel umrechnen!
# Punktwolke E(k) [ Nicht zu Linien verbinden], Spektrumbild mit Winkeln!
# YbIr2Si2
# 4f Schale
# Yb 4f sehr leicht gebunden, Keine Bänder sondern Linien in der Bandstruktur, da schwach gebunden jedoch keine Bindung
# Daten: Erste spalte Energieachse, Intensitäten verschoben
# 10^-6 in einer Sekunde eine Monolage
# Probe saubermachen
# Laser - thermischer Prozess
# Probe reinigen
# Aufheizen durch Elektronenbeschuss
# Hochschspannung und Glühkathode, 2000°C
# Kohlenstoff 3700°C Schmelztemperatur
# In Sauerstoff heizen, so Kohlenstoff oxidieren
# --> Wolframoxid, dann heizen Wolframoxid verdampft bei 2200°C
# Kohlenstoff in Wolframkristall --> diffundiert an die Oberfläche
# Moment abpassen, wo Oxid weg, jedoch kein Kohlenstoff an Oberfläche
# Abwechselnd Heizen, Abkühlen
# Erst wenig heizen, dann progressiv zunehmen
# Beugungsbilder
# Ausprobieren von LEED-Software
# Aufgaben stellen
# Erklärung --> was passiert und wieso?
# Druck Präparationskammer 11:25
p = 1.76e-10 #mbar
# Beschleunigungsspannung Glühwendel
U = 1249 #V
# Erhöhunhg der Temperatur der Glühwendel zur Emission
I = 240 # mA
# Druck nach erstem heizen 11:31
p = 2.3e-9 #mbar
# Sauerstoffeinlass 11:34
p = 2.2e-6 #mbar
# Heizen: 11:35-11:41
U = 1000 #V
I = 50 #mA
# Druck nach heizen
p = 2.32e-6 #mbar
# Sauerstoffzufuhr beenden
# Mit Titan-Getter-Pumpe Sauerstoff zu Festkörper 11:46-11:51
# 11:52
p = 1.16e-7 #mbar
# 13:48
p = 1.2e-9 #mbar
# Überprüfung mittels LEED: kreuzförmige 'Schlieren'
# erstes Heizen:
U = 1000 #V
I = 35 #ma
# 1 Minute
# zweites Heizen:
U = 1000 #V
I = 125 #ma
# 1 Minute
# drittes Heizen:
U = 1250 #V
I = 240 #ma
# 1 Minute
# Ausprobieren LEED-Software, Verkippung macht Beugungsbild fast unbrauchbar
# Z Richtung Einstrahlrichtung
# 1. Was passiert wenn weniger Atome?
# 5,5,5
# Intensität 0.05
# Ekin = 222
# Unscharf, da weniger Strahlen, damit weniger destruktive Interferenz
# Weniger Atome, weniger Interferenzbedingungen
# 2. Nur ein Atom?
# Intensität 0.46
# Ekin = 222
# Keine Interferenz, dadurch Reflektrion in alle Richtungen / es Fehlt zweite Welle
# 3. Kette von Atomen? (x,y,z Richtung?)
# 31 Atome
# x-Richtung
# Intensität 0.011
# Fourier-Trafo in eine Richtung
# Mit Ewald Kugel argumentieren
# z-Richtung
# Intensität 0.022
# Kein Nullreflex
# Schriftlich, Bilder von Beugung und Realraum, Parameter des Bildes
# 4. Stufenbild
# Kippung l/r -6
# I = 0.0
# Stufenbild mit halber Stufenlänge
# Man erhält extra Periodizität durch Stufen im Kristall
# Genauer mathematischer Zusammenhang zwischen Stufenlänge und Beugungsbild
# Was Stufe und nicht Kippung
# Gegenkippung Stufe
# Beugungsbild Stufen sehr gering --> Intensität hochstellen
# Referenzbild mit zu viel Intensität und Stufenbild mit zu viel Intensität
# Schräge Stufen --> Beugungsbild? Wieso?
# Verkippung, Intensität hochstellen
# Verkippung Richtung und Menge
# Stufen?
|
989,167 | e6e8f17ebdbae2f79fe8566f811cd848ff024158 | from rest_framework import serializers
#from rest_framework import employee
from .models import employee
# Serializers define the API representation.
class employeeSerializer(serializers.ModelSerializer):
class Meta:
model = employee
#fields = ['name', 'lastname', 'email','emid']
fields= '__all__' |
989,168 | 527ed037d3a6a6c2cb88cce3009316ec7c37b815 | # coding=utf-8
# author=veficos
import logging
from fluent.asynchandler import FluentHandler
from fluent.handler import FluentRecordFormatter
def create_logger(conf, name):
"""日志对象"""
# 异步流
logger = logging.getLogger(name)
level = logging.getLevelName(conf['level'])
logger.setLevel(level)
handler = FluentHandler(tag=conf['tag'], host=conf['host'], port=conf['port'])
handler.setFormatter(FluentRecordFormatter(fmt={
'level': '%(levelname)s',
'sys_host': '%(hostname)s',
'sys_name': '%(name)s',
'sys_module': '%(module)s',
'function': '[%(pathname)s:%(funcName)s:%(lineno)d]',
'stack_trace': '%(exc_text)s'
}))
handler.setLevel(level)
logger.addHandler(handler)
# 控制台
if conf['env'] == 'dev':
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(
logging.Formatter('%(asctime)s %(levelname)s [%(pathname)s:%(funcName)s:%(lineno)d] %(message)s'))
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
return logger
|
989,169 | 8bec703073155396d44df1e7a78affae4c06edaf |
""" Module that contains the main elements used to checkout items
at the Supermarket """
from decimal import *
import copy
def createInventoryList(pricingRules):
""" Creates the pricing list to use by the Scanner """
inventoryList = {}
for name, rules in pricingRules.iteritems():
inventoryList[name] = Item(name,rules)
return inventoryList
class Item:
""" Represents an Item for sale in the supermarket
tracks the number of items and can calculate the
final price based on the rules passed in """
def __init__(self,name,rules):
self.name = name
self.multiPriceRules = []
self.rules = rules
self.singlePrice = Decimal(rules[1])
for num, price in rules.items():
self.multiPriceRules.append(num)
self.multiPriceRules.sort(reverse=True)
self.numberOfItems = 0
def finalCost(self):
numItemsLeft = self.numberOfItems
rules = copy.deepcopy(self.multiPriceRules)
if rules:
multiRuleTotal = Decimal('0')
while rules:
rule = rules.pop(0)
if numItemsLeft / rule > 0 :
leftOverItems = numItemsLeft % rule
multiRuleTotal += Decimal((numItemsLeft - leftOverItems) / rule) * Decimal(self.rules[rule])
numItemsLeft = leftOverItems;
return multiRuleTotal + Decimal(numItemsLeft) * self.singlePrice
return Decimal('0')
class Scanner:
""" Supermarket scanner scans individual items
keeping track of all items and can calculate
and print the bill """
def __init__(self, itemList):
self.itemList = itemList
def scanItem(self,item):
if isinstance(item,basestring) and item in self.itemList:
self.itemList[item].numberOfItems += 1
def scanItems(self,itemList):
for item in itemList:
self.scanItem(item)
def totalPrice(self):
total = Decimal('0')
for name,item in self.itemList.items():
total += item.finalCost();
return total
def printItemizedBill(self):
print "Vogogo Grocery"
print "-----------------"
total = Decimal('0')
for name,item in self.itemList.items():
if item.numberOfItems > 0:
print str(item.numberOfItems).ljust(3) + name.ljust(10) + " at $" + str(item.finalCost()).ljust(4)
total += item.finalCost()
print "-----------------"
print "Balance Due: ".ljust(17) + "$" + str(total)
|
989,170 | 48468ddee1fb777e22a468e99dc9823421f738a0 | #TIC TAC TOE
import random
import math
import sys
def checkIntLength(int):
return len(str(int))
def is_int(integer):
try:
int(integer)
except ValueError:
return False
return integer
def create_game():
global game
#Wipe game
game = None
#Create new game, set it up, choose player to begin
game = Game()
game.setup_game()
game.choose_player()
class Game:
def __init__(self):
self.player1 = None
self.player2 = None
self.board = None
self.gameOver = False
self.currentPlayer = None
self.moves = 0
def setup_game(self):
#Get Player names - no restrictions
print("Player 1, what is your name?")
self.player1 = Player(input())
print("Player 2, what is your name?")
self.player2 = Player(input())
#Randomly assign X and O
if(random.randrange(0,2) == 0):
self.player1.markerType = 'O'
self.player2.markerType = 'X'
else:
self.player1.markerType = 'X'
self.player2.markerType = 'O'
#Get the square root of the grid to generate the board
print("What square root would you like to generate a board with? (normal game is 3)")
gridRoot = is_int(input())
#Ensure user input is whole number
while(gridRoot == False):
print("Sorry, must be a whole number")
gridRoot = is_int(input())
#Generate board
self.board = Board(float(gridRoot))
def end_game(self, message):
self.board.draw_board()
print("\n"+str(message)+" wins!\nGAME OVER!")
print("Would you like to play again, y/n?")
run = True
choice = input().lower()
#Check if selection is y or n, if not try again
while(run != False):
if(choice == 'y' or choice == 'n'):
run = False
break
else:
print("That is not an option - please use either 'y' or 'n'")
choice = input().lower()
#End or begin again
if(choice == 'n'):
exit()
elif(choice == 'y'):
print("STARTING ANOTHER GAME")
create_game()
def check_win(self, coords):
rowNum = coords[0]
colNum = coords[1]
checkMarker = self.currentPlayer.markerType
gridRoot = int(math.sqrt(self.board.gridSize))
playerName = self.currentPlayer.name
#HORIZONTAL
#Check if row, where latest move was made, resulted in horizontal win
for i, slot in enumerate(self.board.grid[rowNum]):
#If other marker detected, stop looking
if(slot.marker != checkMarker):
break
#If loop ever runs to end, meaning all are the same, end game
elif(i == gridRoot - 1):
self.end_game(playerName)
#VERTICAL
#Check if column, where latest move was made, resulted in vertical win
for i, row in enumerate(self.board.grid):
if(row[colNum].marker != checkMarker):
break
elif(i == gridRoot - 1):
self.end_game(playerName)
#LEFT DIAGONAL
#If latest move was 1->len diagonal (right-slant), or middle, check
if(rowNum == colNum):
for i, row in enumerate(self.board.grid):
if(row[i].marker != checkMarker):
break
elif(i == gridRoot - 1):
self.end_game(playerName)
#RIGHT DIAGONAL
#If latest move was [0][len]->[len][0] diagonal (left-slant), or middle, check
if(rowNum + colNum + 1 == gridRoot):
for i, row in enumerate(self.board.grid):
#Calculate correct position of diagonal to check (x = gridRoot - 1 - i)
colCheck = gridRoot - 1 - i
if(row[colCheck].marker != checkMarker):
break
elif(i == gridRoot - 1):
self.end_game(playerName)
def choose_player(self):
num = random.randrange(1,2)
if(num == 1):
self.currentPlayer = self.player1
else:
self.currentPlayer = self.player2
def switch_player(self):
if(self.currentPlayer == self.player1):
self.currentPlayer = self.player2
else:
self.currentPlayer = self.player1
class Board:
def __init__(self, root):
self.gridRoot = int(root)
self.gridSize = int(math.pow(root, 2))
self.grid = []
slotNum = 0
#Build grid and populate with Empty Slots
for x in range(0, int(root)):
self.grid.append([])
for y in range(0, int(root)):
slotNum += 1
self.grid[x].append(Slot('-'))
def draw_board(self):
#Create outer loop to run through rows in grid and inner loop to print elements (usually 3x3)
#Local variable i and j counts the number of loops - last loop skips row print and column print, respectively
div = ''
divLen = 0
#Give some space after last printed statement
print('\n')
#Calculate length of divider (3xrowLen) + (rowLen-1)
divLen = (3*self.gridRoot) + (self.gridRoot-1)
#Create divider
for i in range(0, divLen):
div += '-'
#Create board array
for i, row in enumerate(self.grid):
rowString = ''
for j, slot in enumerate(row):
rowString += ' '+slot.marker+' '
#Check if loop is final, skip printing column
if(j != len(row)-1):
rowString += '|'
print(rowString)
#Check if loop is final, skip printing row
if(i != len(self.grid)-1):
print(div)
print('\n')
def place_marker(self, coords):
marker = game.currentPlayer.markerType
row = coords[0]
col = coords[1]
#Place marker at coords, set slot to be used
self.grid[row][col].marker = marker
self.grid[row][col].used = True
#Check if position is empty
def check_slot(self, movePosition):
coords = self.get_coords(movePosition)
row = coords[0]
col = coords[1]
if(self.grid[row][col].used):
return False
else:
return coords
def get_coords(self, movePosition):
gridRoot = math.sqrt(self.gridSize)
movePosition = float(movePosition)
#Find corresponding row by dividing position by the row length (gridRoot) and rounding up
row = int(math.ceil(movePosition/gridRoot)-1.0)
#Find corresponding column by subtracting the column length from position, and multiplying by row to convert value to simulate 1 row
col = int((movePosition-(gridRoot*row))-1.0)
return [row, col]
#Check if board is full (comes after win check - this means nobody won)
def check_full(self, moves):
if(moves >= self.gridSize):
game.end_game("Nobody")
class Slot:
def __init__(self, init):
self.marker = init
self.used = False
class Player:
def __init__(self, name):
self.name = name
self.markerType = None
def set_marker(self, marker):
self.markerType = marker
def player_input(self, board):
valid = False
movePosition = input()
#Validate input - must be a number (int), must be above 0 and less than, or equal to, the grid length
while(valid != True):
if(movePosition.isdigit() and int(movePosition) > 0 and int(movePosition) <= board.gridSize):
valid = True
break
print("Invalid selection, please select a number corresponding to a position on the board.")
movePosition = input()
if(movePosition == 0):
game.end_game("Nobody")
return movePosition
def player_turn(self, board):
print(game.currentPlayer.name + ", it's your turn - where will you place your '"+
game.currentPlayer.markerType+"'? (Choose Slot 1-"+str(board.gridSize)+")")
#Set initial input
movePosition = None
coords = False
#Check if slot is available, if not then get another input
while(coords == False):
if(movePosition != None):
print('Sorry, that slot is already used, please pick another')
movePosition = self.player_input(board)
coords = board.check_slot(movePosition)
#Once valid input found, and slot not used - place marker
board.place_marker(coords)
game.moves += 1
#Send back coords so position can be win-checked
return coords
##GAME LOGIC##
game = None
create_game()
#Run Game
while(game.gameOver == False):
#Set which Player goes first
game.board.draw_board()
#Make Player turn, fetch coords of move
moveCoords = game.currentPlayer.player_turn(game.board)
#Check for winner here - if winner, state winner and end game
game.check_win(moveCoords)
#If no winner, check if board is full - if so, end game
game.board.check_full(game.moves)
#If no win, switch player
game.switch_player() |
989,171 | 33b04970c5b5e71fceeff57796c8dea60643eee0 | #!/usr/bin/env python
#####################################
# Installation module for SecLists
#####################################
AUTHOR="Justin Fry"
INSTALL_TYPE="GIT"
REPOSITORY_LOCATION="https://github.com/danielmiessler/SecLists"
LAUNCHER="SecLists"
|
989,172 | 620a2376f2582b36fdc6a6e239160b6a56421141 | import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from config import ACCOUNT_PATH
def login(driver):
file = open(ACCOUNT_PATH, 'r', encoding='UTF-8')
account = json.load(file)
driver.get('https://adobeid-na1.services.adobe.com/renga-idprovider/pages/login?callback=https%3A%2F%2Fims-na1.adobelogin.com%2Fims%2Fadobeid%2Fmixamo1%2FAdobeID%2Fcode%3Fredirect_uri%3Dhttps%253A%252F%252Fwww.mixamo.com%252F%2523%252Fimsauth&client_id=mixamo1&scope=openid%2CAdobeID%2Cfetch_sao%2Csao.creative_cloud&denied_callback=https%3A%2F%2Fims-na1.adobelogin.com%2Fims%2Fdenied%2Fmixamo1%3Fredirect_uri%3Dhttps%253A%252F%252Fwww.mixamo.com%252F%2523%252Fimsauth%26response_type%3Dcode&display=web_v2&relay=49dab126-20fe-4421-a1e7-5dbd2c4b0110&locale=en_US&flow_type=code&ctx_id=mixamo_web&idp_flow_type=login')
driver.find_element_by_id('adobeid_password').send_keys(account.get('password'))
driver.find_element_by_id('adobeid_username').send_keys(account.get('account'))
driver.execute_script('arguments[0].click();', driver.find_element_by_id('sign_in')) # 요즘엔 element.click() 이 안 먹나 보네
|
989,173 | 04df596f44302d832a13b405704a3c702f88c13b | import stdio
class Node(object):
def __init__(self, value = None, pointer = None):
self.value = value
self.pointer = pointer
class LinkedQueue(object):
def __init__(self):
self.head = None
self.tail = None
self._length = 0
self.iter_pointer = None
def isEmpty(self):
return not bool(self.head)
def dequeue(self):
if self.head:
value = self.head.value
self.head = self.head.pointer
return value
else:
return 'queue is empty'
def enqueue(self,value):
node = Node(value)
if not self.head:
self.head = node
self.tail = node
elif self.tail:
self.tail.pointer = node
self.tail = node
self._length += 1
def size(self):
node = self.head
count = 0
while node:
count += 1
node = node.pointer
return count
def peek(self):
if self.head:
return self.head.value
def __len__(self):
return self._length
def __str__(self):
_string_ = ''
node = self.head
while node:
_string_ += (str(node.value) + '-> ' )
node = node.pointer
return _string_
def __iter__(self):
self.iter_pointer = self.head
return self
def next(self):
if self.iter_pointer :
value = self.iter_pointer.value
self.iter_pointer = self.iter_pointer.pointer
return value
else :
raise StopIteration()
def _print(self):
node = self.head
while node:
stdio.writeln(node.value)
node = node.pointer
if __name__ == "__main__":
q = LinkedQueue()
stdio.writeln("Is the queue empty? {}".format(q.isEmpty() ))
for i in range(10,30, 3):
q.enqueue(i)
stdio.writeln(q)
stdio.writeln("Is the queue empty? {}".format(q.isEmpty() ))
stdio.writeln("Queue size? {}".format(q.size() ))
stdio.writeln("Queue peek? {}".format(q.peek() ))
stdio.writeln("Queue dequeue? {}".format(q.dequeue() ))
stdio.writeln("Queue peek? {}".format(q.peek() )) |
989,174 | eac804de1e3d19c56b0994297de6084e720fa6bc | from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.urls import reverse_lazy
# Create your models here.
class UserProfile(models.Model):
# user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='profile') # user.profile
# following = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, related_name='followed_by')
# user.profile.following -- users i follow
# user.followed_by -- users that follow me -- reverse relationship
# objects = UserProfileManager() # UserProfile.objects.all()
# objects = UserProfileManager() # UserProfile.objects.all()
# abc = UserProfileManager() # UserProfile.abc.all()
def __str__(self):
return str(self.following.all().count())
def get_absolute_url(self):
return reverse_lazy("profiles-detail", kwargs={"username":self.user.username})
|
989,175 | 95af1d6d1042b0a368e8364f779a54a060a33b38 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def selection_sort(a):
brd = 0
mn = 0
while brd<len(a):
mn = brd
for i in range(brd, len(a)):
if a[i]<a[mn]:
mn = i
a[brd], a[mn] = a[mn], a[brd]
brd += 1
return a
|
989,176 | 565f44e16366764320f8fc9e20ef5b3a5be7be63 | from flask import abort
from flask import request
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import verify_jwt_in_request
from functools import wraps
from app.models import User
# Custom decorator that ensures a user is an organizer
def role_organizer(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
verify_jwt_in_request()
user_id = get_jwt_identity()
user = User.query.get(user_id)
if not user.is_organizer():
abort(401)
return fn(*args, **kwargs)
return wrapper
class Security:
# Returns the current user from the current jwt identity
@staticmethod
def get_current_user():
user_id = get_jwt_identity()
if not user_id:
return None
user = User.query.get(user_id)
return user
# Does the given user id match the current user id?
@staticmethod
def is_current_user(user_id):
current_id = get_jwt_identity()
return current_id == user_id
# Does the given user id match the current user id, or is that user an admin?
@staticmethod
def is_current_user_or_organizer(user_id):
current_id = get_jwt_identity()
user = User.query.get(current_id)
return user_id == current_id or user.is_organizer() |
989,177 | b98bfd523af4409e137c6d642e27c7a72ba7261a | from functools import partial
import logging
import networkx as nx
from .pulse import Pulse
from .pulse_context import PulseContext
from proj.config import configuration
def _initialize_pulse(
graph, source, target, weight, constraints=None, primal_bound=None):
"""
Initialization phase of pulse algorithm
"""
constraints = constraints or {}
context = PulseContext(
source=source,
target=target,
weight=weight,
graph=graph,
best_cost=primal_bound,
constraints=constraints,
)
return context
def _log_stats(context, current=None):
"""
Log pulse stats
"""
stats = context.stats()
if current:
logging.debug(f'Pulse: current {current}, stack len: {len(context.pulses)}')
logging.debug(
'Pruned by cost: {cost_pruned}, dominance: {dominance_pruned}, infeasibility: {inf_pruned}'.format(**stats)
)
logging.debug(
'Active pulses: {total_pulses}, nodes reached: {nodes_reached}, total nodes: {total_nodes}'.format(**stats)
)
def _pulse(graph, *args, **kwargs):
"""
Compute shortest path using pulse algorithm
Args:
graph: networkx graph instance
source: source node
target: target node
weight: edge weight to minimize
constraints: dict of constraints, default None
primal_bound: path cost between source and target used to bound branches.
Returns:
generator that yields path, path_weights
"""
if not graph:
raise Exception('Graph is empty')
context = _initialize_pulse(graph, *args, **kwargs)
iteration = 0
while True:
current = context.pop_pulse()
if not current:
return
iteration += 1
if iteration % 10000 == 0:
_log_stats(context, current)
if graph.is_multigraph():
out_edges = graph.edges(current.node, keys=True)
else:
out_edges = graph.edges(current.node)
adjacency = [(edge[1], edge) for edge in out_edges]
for adjacent, edge in adjacency:
if adjacent == current.node:
# Ignore self loops
continue
if configuration.pulse_discard_faraway_nodes \
and (context.get_cost_bound(adjacent) - context.get_cost_bound(current.node) >= \
configuration.pulse_discard_faraway_delta):
# Ignore adjacent whose lower bound cost plus delta is greater than current
continue
edge_weights = graph.edges[edge]
candidate_pulse = Pulse.from_pulse(current, adjacent, edge_weights, edge)
# Cost pruning
if not context.satisfies_cost(candidate_pulse):
continue
# Infeasibility pruning
if context.dissatisfies_constraints(candidate_pulse):
continue
# Dominance pruning
if context.is_dominated(candidate_pulse):
continue
context.save_pulse(candidate_pulse)
if adjacent == context.target:
_log_stats(context)
yield candidate_pulse.to_path(), dict(candidate_pulse.weights)
def pulse(*args, **kwargs):
"""
Calls pulse algorithm.
If return best only is enabled, then returns the best.
"""
weight_key = kwargs.get('weight')
pulses_generator = _pulse(*args, **kwargs)
if not configuration.pulse_return_best:
return pulses_generator
best = None
best_cost = None
for pulse, weights in pulses_generator:
if best is None or best_cost > weights.get(weight_key):
best_cost = weights.get(weight_key)
best = pulse, weights
return [best]
|
989,178 | 5923429ab2ff95bca91ab5ebcbb74b12ac415ff8 | from PyQt4.QtGui import QPushButton, QFontDatabase, QFont
from PyQt4 import QtCore
__author__ = 'umqra'
class CustomButton(QPushButton):
stylesheet = """
QPushButton {border: 2px solid gray; background-color:white; padding: 8px;}
QPushButton:hover {border: 2px solid black; background-color:white; padding: 8px;}
"""
def __init__(self, text):
super().__init__(text)
print(CustomButton.stylesheet)
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.setStyleSheet(CustomButton.stylesheet)
self.setFont(QFont('BACKTO1982'))
|
989,179 | 160a9fced17557133b9005f97d5364d7322d82c6 | # Generates a boot grammar and stores it in boot_generated.py. If everything looks OK you can
# replace your boot.py with the generated module.
if __name__ == '__main__':
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from pyometa import builder, grammar
fp = open(os.path.join(os.path.dirname(__file__), 'boot_generated.py'), 'wb')
ometa_grammar = grammar.OMetaGrammar(grammar.ometaGrammar)
tree = ometa_grammar.parseGrammar('BootOMetaGrammar', builder.TreeBuilder)
fp.write(builder.writeBoot(tree))
|
989,180 | 626cecaa1dd7d753244aed2efde41f10b65dded5 | """empty message
Revision ID: 2a9d46e6a8d2
Revises: 21190b81b538
Create Date: 2019-07-23 16:46:43.130845
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2a9d46e6a8d2'
down_revision = '21190b81b538'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password_reset', sa.String(length=60), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'password_reset')
# ### end Alembic commands ###
|
989,181 | 726eac432ddde18b85bf8c0d0995bb560fbfc93f | ## MTRX5700
# Xue Yin Zhang
#
# Get some navigation while starting up and calibrating and doing a move
## clean startup sequence
import time, sys
import ps_drone # Import PS-Drone-API
import pickle
import numpy as np
## create empty lists where we store the navdata
pitch = []
roll = []
yaw = []
vx = []
vy = []
vz = []
nav_time = []
mx = []
my = []
mz = []
altitude_ref = []
detect_n = []
detect_dist = []
detect_rot = []
## store all the navdata
def append_nav(drone):
pitch.append(drone.NavData["demo"][2][0]) # theta
roll.append(drone.NavData["demo"][2][1]) # phi
yaw.append(drone.NavData["demo"][2][2]) # psi
vx.append(drone.NavData["demo"][4][0]) # velocity in x-direction
vy.append(drone.NavData["demo"][4][1]) # velocity in y-direction
vz.append(drone.NavData["demo"][4][2])
nav_time.append(drone.NavData["time"][0])
mx.append(drone.NavData["magneto"][0][0])
my.append(drone.NavData["magneto"][0][1])
mz.append(drone.NavData["magneto"][0][2])
altitude_ref.append(drone.NavData["altitude"][0]) # altitude in mm
detect_n.append(drone.NavData["vision_detect"][0]) # number of markers detected
detect_dist.append(drone.NavData["vision_detect"][6][0])
detect_rot.append(drone.NavData["vision_detect"][7][0])
return
## receive navdata packet
def wait_nav(drone, time_s):
start = time.time()
NDC = drone.NavDataCount
while (time.time() - start) < time_s:
if drone.NavData["altitude"][0] > 1500: # drone has reached soft altitude limit
print "drone too high, stop and land"
drone.stop()
print "stopped"
time.sleep(1.0)
drone.moveDown(1.0)
print "moving down"
time.sleep(3.0)
print "landing"
drone.land()
save_nav()
sys.exit("exited program")
if drone.NavData["vision_detect"][0] > 0: # drone sees a tag
print "detected tag, telling drone to stop and land"
# drone.stop() # stop the drone
# time.sleep(2.0)
# alpha = drone.NavData["vision_detect"][7][0]
self_correct(drone)
# beta = 180.0 - alpha
# if beta == 180.0:
# beta = beta + 0.1
# drone.turnAngle(beta,0.25)
# time.sleep(2.0)
# drone.moveForward(0.1)
# time.sleep(6.0)
# drone.stop()
# print "stop"
# time.sleep(0.5)
# drone.land()
# save_nav()
# sys.exit("exited program")
dbstart = time.time()
# print "start time: " + str(dbstart)
# wait for the next data package
while drone.NavDataCount == NDC: time.sleep(0.001) # Wait until next time-unit
NDC = drone.NavDataCount
append_nav(drone)
print "delta time: " + str(time.time() - dbstart)
return
def self_correct(drone):
drone.stop() # stop the drone
time.sleep(1.0)
print "drone has seen marker, drone stopped"
# get new angle reading
NDC = drone.NavDataCount
while drone.NavDataCount == NDC: time.sleep(0.001) # Wait until next time-unit
alpha = drone.NavData["vision_detect"][7][0]
speed_scale = 0.08 # set the speed for the movement
# alpha = alpha + 180.0 # because of maths!! see my notepad (in red pen) for more details
alpha = alpha - 90.0
move1 = speed_scale*np.cos(np.deg2rad(alpha)) # left and right movements
move1 = float(move1)
move2 = speed_scale*np.sin(np.deg2rad(alpha)) # front and back movements
move2 = float(move2)
# print "motor 1 (L/R): " + move1
# print "motor 2 (F/B): " + move2
print move1
print move2
# drone.land()
# time.sleep(2.0)
# sys.exit("do not attempt drone.move yet")
print "starting to move away from edge"
drone.move(move1, move2, 0.0, 0.0) # back away from the edge
time.sleep(2.0)
print "finished moving away from edge"
drone.move(move1, move2, 0.0, 0.0) # back away from the edge
time.sleep(2.0)
# drone.stop()
# time.sleep(2.0)
# if alpha > 190.0: # drone needs to turn anti-clockwise to self-correct
#
# print "drone is turning left to self-correct"
# drone.turnLeft(0.2) # try to self-correct
#
# while alpha > 190.0: # wait til angle is close enough
# NDC = drone.NavDataCount
# while drone.NavDataCount == NDC: time.sleep(0.001) # wait for the next data package
# alpha = drone.NavData["vision_detect"][7][0]
#
# print "done self-correcting for angle"
#
# elif alpha < 170.0: # drone needs to turn clockwise to self-correct
#
# print "drone is turning right to self-correct"
# drone.turnRight(0.2) # try to self correct
#
# while alpha < 170.0: # wait til angle is close enough
# NDC = drone.NavDataCount
# while drone.NavDataCount == NDC: time.sleep(0.001) # wait for the next data package
# alpha = drone.NavData["vision_detect"][7][0]
#
# print "done self-correcting for angle"
#
# # all cases of the previous if statement runs this bit of code
# print "moving away from the edge"
# drone.moveForward(0.25)
# time.sleep(5.0)
# print "done moving away from edge"
return
def save_nav():
print "trying to save data..."
with open('navdata-vars.pickle', 'w') as f: # Python 3: open(..., 'wb')
pickle.dump([pitch, roll, yaw, vx, vy, vz, nav_time, mx, my, mz, altitude_ref, detect_n, detect_dist, detect_rot], f)
print "saved navdata"
return
|
989,182 | 429fa3f9d47de752d12ad1a9871d6fd46f11fffa | import pytest
from pandas import Series
@pytest.mark.parametrize(
"data, index, expected",
[
([1, 2, 3], None, 3),
({"a": 1, "b": 2, "c": 3}, None, 3),
([1, 2, 3], ["x", "y", "z"], 3),
([1, 2, 3, 4, 5], ["x", "y", "z", "w", "n"], 5),
([1, 2, 3], None, 3),
([1, 2, 3], ["x", "y", "z"], 3),
([1, 2, 3, 4], ["x", "y", "z", "w"], 4),
],
)
def test_series(data, index, expected):
# GH#52897
ser = Series(data, index=index)
assert ser.size == expected
assert isinstance(ser.size, int)
|
989,183 | 3e509acb93640f0ddad61a32d949c245d4702965 | def bubble_sort(lists):
# 冒泡排序
count = len(lists)
for i in range(0,count):
for j in range(i+1,count):
if lists[i]>lists[j]:
temp = lists[j]
lists[j] = lists[i]
lists[i] = temp
return lists |
989,184 | 4b9b08d643f32ee07be337666e3edfc6f44ab8f6 | from cloneslay.card import Card
from cloneslay.cards.ironclad.wound import Wound
from random import randrange
class WildStrike(Card):
def __init__(self):
super().__init__("Wild Strike", 1, "attack", "Deal 12 damage.Shuffle a Wound into.your draw pile", "bash.png",
rarity="common")
def activate(self, actor, goal):
Card.attack(12, actor, goal)
actor.draw.cards.insert(randrange(len(actor.draw.cards), Wound()))
|
989,185 | 49264410f023f80cae5c940075dc3d79d94bb45e | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
class Duck:
sound = 'Quack quack.'
movement = 'Walks like a duck.'
step = 0
def quack(self): #self is reference to the object ==> can be named of anything but 'self' is traditional
print(self.sound)
def move(self):
print(self.movement)
def increaseStep(self):
self.step += 1
def getStep(self):
return self.step
def main():
donald = Duck()
print(donald.sound) #same as using with methods but using methods is more recommended
donald.quack()
donald.move()
x = donald.getStep()
print("Before increasing with method step = {}".format(x))
for i in range(5):
donald.increaseStep()
x = donald.getStep()
print("After increasing with method step = {}".format(x))
if __name__ == '__main__': main()
|
989,186 | b076f138e70b365938af85e55f0bc01c800ca737 | """
quick_select method implementation
file: select_median.py
author: Youssef Naguib <ymn4543@rit.edu>
language: python3.7
description: Lab 6 solution
"""
import time
def FileList(file):
"""
This function reads an input text file and converts it into a string of integers.
pre: parameter must be a valid filename
post: a list of integers only is created
:param file: the name of the file used for list. must be inside quotations.
:return: a list of integers
"""
with open(file,"r") as f:
list1 = [r.split()[1] for r in f]
list1 = [int(i) for i in list1]
return list1
def quick_sort( L ):
"""
quickSort: List( A ) -> List( A )
where A is 'totally ordered'
pre: L parameter must be valid list
post: a sorted list is returned
param: L is a list, it does not have to be sorted.
return: a sorted version of L
"""
if L == []:
return []
else:
pivot = L[0]
( less, same, more ) = partition( pivot, L )
return quick_sort( less ) + same + quick_sort( more )
def partition( pivot, L ):
"""
partition: A * List( A ) -> Tuple( List( A ), List( A ), List( A ) )
where A is totally ordered
pre: 2 parameters required, pivot must be 0 or greater, and L must be a
list.
post: 3 sub lists of L are created (less than, equal to, and greater than)
param: pivot is the element being compared to each other element in list.
param: L is a list (sorted or unsorted)
return: a tuple of 3 lists is returned
"""
( less, same, more ) = ( [], [], [] )
for e in L:
if e < pivot:
less.append( e )
elif e > pivot:
more.append( e )
else:
same.append( e )
return ( less, same, more )
def quick_select(lst, k):
"""
This function finds the k'th smallest number in a list of numbers.
pre: k must be in range (0, length of list - 1), lst must be a valid list
of numbers.
post: k'th smallst number in lst is found.
:param lst: a valid list of numbers, it does not need to be sorted.
:param k: represents which number user wants. for example k = 2 would find
the second smallest number.
:return: The k'th smallest number in the list.
"""
if lst != []:
pivot = lst[(len(lst)//2)]
smallerlist = []
largerlist = []
count = 0
for i in range(0,len(lst)):
if lst[i] == pivot:
count += 1
elif lst[i] < pivot:
smallerlist.append(lst[i])
elif lst[i] > pivot:
largerlist.append(lst[i])
m = len(smallerlist)
if k >= m and k < m + count:
return pivot
elif m > k:
return quick_select(smallerlist, k)
else:
return quick_select(largerlist,k - m - count)
def median_distance(L):
"""
Finds the median value in a sorted list of numbers.
Pre: L must be a sorted list.
Post: Median value is returned from list.
:param L: a sorted list
:return: the median element in the sorted list
"""
if len(L) % 2 == 0:
m = (L[len(L)//2] + L[(len(L)//2)-1])/2
elif len(L) % 2 == 1:
m = L[len(L)//2]
return m
def find_sums(lst, m):
"""
This function calculates the sum of the distances of all the locations
from the median location.
Pre: lst must be valid list of numbers only.
post: Sum of distances from median is returned
:param lst: a list of numbers
:param m: the median value
:return: sum of distances from median value
"""
sum = 0
for i in range(0,len(lst)):
if m > int(lst[i]):
sum1 = abs(m - int(lst[i]))
elif m < int(lst[i]):
sum1 = abs(int(lst[i]) - m)
else:
pass
sum += sum1
return sum
def median_quick_select(list):
"""
This function finds the median value in a list (sorted or unsorted),
using the quick_select algorithm.
pre: parameter must be list, list must include numbers only.
post: median value is returned.
:param list: a valid list of numbers, it does not need to be sorted.
:return: the median value from elements in list.
"""
if len(list) % 2 == 0: # if list is even
m = int(quick_select(list,(len(list)//2))) #2
m2 = int(quick_select(list, (len(list)//2)-1)) #m2 is always smaller #1
median = m2 + ((m - m2)/2)
elif len(list) % 2 == 1: #if list is odd
median = quick_select(list,len(list)//2)
return median
def main():
"""
This is the main function which is executed when user runs the program.
pre: user must run program and input valid filename.
post: Using values in file, the optimum store location, sum of distances
of other locations to store, and elapsed time of program are
calculated and displayed to user.
"""
list = FileList(input('Enter data file: '))
start = time.time()
median = (median_quick_select(list))
print('Optimum new store Location:', median)
print('Sum of distances to new store:', find_sums(list,median),'\n')
end = time.time()
print('elapsed time:', end-start)
if __name__=='__main__':
main()
|
989,187 | 53ab2b52d7a6fbbe74e2169e3df7e2cdadc15633 | /home/perlusha/anaconda3/lib/python3.6/posixpath.py |
989,188 | abea8e59c7c2f4ea17f2c605997faddada6525d8 | from flasgger import swag_from
from flask import request
from app.extensions import db
from app.docs.sample import *
from app.views import BaseResource
from app.models import Funding
class NewFunding(BaseResource):
@swag_from(SAMPLE_POST)
def post(self):
pass
class NewFundingVerify(BaseResource):
@swag_from(SAMPLE_POST)
def post(self):
pass
|
989,189 | d50303e4f133f70dbcca339854be988cc3ba482a | from random import randint
from time import sleep
itens = ('Pedra', 'Papel', 'Tesoura')
computador = randint(0,2)
print('O computador escolheu {}'.format(itens[computador]))
print('''
Suas opções:
0 - Pedra
1 - Papel
2 - Tesoura
''')
jogador = int(input('Insira sua jogada: '))
print ('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO')
if jogador == computador:
print('-='*20)
print('O computador escolheu {} e o jogador {}, então empatou'.format(itens[computador],itens[jogador]))
print('-='*20)
elif jogador == 0 and computador == 1:
print('-='*20)
print('O computador escolheu {} e o jogador {}, então computador ganhou'.format(itens[computador],itens[jogador]))
print('-='*20)
elif jogador == 0 and computador == 2:
print('-='*20)
print('O computador escolheu {} e o jogador {}, então jogador ganhou'.format(itens[computador],itens[jogador]))
print('-='*20)
elif jogador == 1 and computador == 0:
print('-='*20)
print('O computador escolheu {} e o jogador {}, então jogador ganhou'.format(itens[computador],itens[jogador]))
print('-='*20)
elif jogador == 1 and computador == 2:
print('-='*20)
print('O computador escolheu {} e o jogador {}, então computador ganhou'.format(itens[computador],itens[jogador]))
print('-='*20)
elif jogador == 2 and computador == 0:
print('-='*20)
print('O computador escolheu {} e o jogador {}, então computador ganhou'.format(itens[computador],itens[jogador]))
print('-='*20)
elif jogador == 2 and computador == 1:
print('-='*20)
print('O computador escolheu {} e o jogador {}, então Jogador ganhou'.format(itens[computador],itens[jogador]))
print('-='*20)
else:
print('-='*20)
print('JOGADA INVÁLIDA')
print('-='*20) |
989,190 | 788cd814f914c49b1edb7524084afeef0e6e8aed | import re
import unicodedata
from django.db import models
from django.db.utils import IntegrityError
class AutoSlugMixin(models.Model):
"""A mixin to provide auto slug generation."""
slug = models.CharField(max_length=100, null=False, blank=True, db_index=True, unique=True)
class Meta:
abstract = True
def _slugify(value):
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+', '-', value)
def save(self, **kwargs):
if self.slug:
super(AutoSlugMixin, self).save(**kwargs)
else:
saved = False
i = 0
while not saved:
self.slug = self._slugify(self.title)
if i:
self.slug += '-' + i
try:
super(AutoSlugMixin, self).save(**kwargs)
saved = True
except IntegrityError, ex:
# if the error is a duplicate slug, then retry, else throw
if ex.args[0] == 1062 and ex.args[1].find(self.slug) != -1:
i += 1
continue
else:
raise ex |
989,191 | a01046931c60b7db5992eee79864de1c83776798 | from django.conf.urls import url
from apps.usuarios.views import RegistroUsuarios
urlpatterns =[
url(r'^registrar', RegistroUsuarios.as_view(), name='registrar')
] |
989,192 | e019a7ac484cd9a5d70e9a4aa5f2239b43c58ff5 | import pandas as pd
import numpy as np
import json
import ast
from bokeh.plotting import figure, show
from bokeh.embed import components
from bokeh.resources import CDN
import cPickle as pickle
def make_plot_code(df):
fraud_file = pd.read_csv('plots/graphfraud.csv')
not_fraud_file = pd.read_csv('plots/graphnotfraud.csv')
df = df.drop(['has_header'],axis=1)
with open('plots/pca.pkl') as f:
pca = pickle.load(f)
with open('plots/scaler.pkl') as f:
scaler = pickle.load(f)
print df.values
try:
df_ = df.fillna(0.)
x = pca.transform(scaler.transform(df_.values))
except:
try:
df_ = df.fillna(0)
x = pca.transform(scaler.transform(df_.values))
except:
df_ = df.fillna(str(0))
x = pca.transform(scaler.transform(df_.values))
p = figure(title="Fraud Visualization", plot_width=400, plot_height=400)
p.background_fill_color = "#eeeeee"
p.scatter(not_fraud_file.values[:,0],not_fraud_file.values[:,1],marker='triangle',color='orange',legend='Not Fraud')
p.scatter(fraud_file.values[:,0],fraud_file.values[:,1],marker='circle',color='red',legend='Fraud')
p.scatter(x[0,0],x[0,1],color='blue',legend='Most recent event',marker='square',size=10)
script, div = components(p)
div = ast.literal_eval(json.dumps(div)).replace('\n', "")
script = ast.literal_eval(json.dumps(script)).replace('\n', "")
css = CDN.render_css()
css = ast.literal_eval(json.dumps(css)).replace('\n', "")
js = CDN.render_js()
js = ast.literal_eval(json.dumps(js)).replace('\n', "")
return div, script, css, js
|
989,193 | bc685ba10c1cd80718bb2e0ca69d36fe967d0ab4 | from datetime import datetime
from django.db import models
from DjangoUeditor.models import UEditorField
# Create your models here.
class GoodsCategory(models.Model):
"""
商品类别
"""
CATEGORY_TYPE = (
(1, "一级类目"),
(2, "二级类目"),
(3, "三级类目"),
)
name = models.CharField(verbose_name="类别名", default="", max_length=30, help_text="类别名")
code = models.CharField(verbose_name="类别code", default="", max_length=30, help_text="类别code")
desc = models.CharField(verbose_name="类别描述", default="", help_text="类别描述", max_length=100)
category_type = models.IntegerField(verbose_name="类目级别", help_text="类目级别", choices=CATEGORY_TYPE)
parent_category = models.ForeignKey("self", verbose_name="父类别", related_name="sub_cat", null=True, blank=True,
on_delete=models.CASCADE)
is_tab = models.BooleanField(verbose_name="是否导航", help_text="是否导航", default=False)
add_time = models.DateTimeField(verbose_name="添加时间", default=datetime.now)
class Meta:
verbose_name = "商品类别"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class GoodsCategoryBrand(models.Model):
"""
品牌名
"""
category = models.ForeignKey(GoodsCategory, related_name="brands", verbose_name="商品类目", null=True, blank=True,on_delete=models.CASCADE)
name = models.CharField(verbose_name="品牌名", help_text="品牌名", default="", max_length=30)
desc = models.CharField(verbose_name="品牌描述", help_text="品牌描述", default="", max_length=200)
image = models.ImageField(upload_to="brands/", max_length=200)
add_time = models.DateTimeField(verbose_name="添加时间", default=datetime.now)
class Meta:
verbose_name = "品牌"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Goods(models.Model):
"""
商品
"""
category = models.ForeignKey(GoodsCategory, verbose_name="商品类目",on_delete=models.CASCADE)
goods_sn = models.CharField(verbose_name="商品唯一货号", max_length=50, default="")
name = models.CharField(verbose_name="商品名", max_length=300)
click_num = models.IntegerField(verbose_name="点击数", default=0)
sold_num = models.IntegerField(verbose_name="商品销售量", default=0)
fav_num = models.IntegerField(verbose_name="收藏数", default=0)
goods_num = models.IntegerField(verbose_name="库存数", default=0)
market_price = models.FloatField(verbose_name="市场价格", default=0)
shop_price = models.FloatField(verbose_name="本店价格", default=0)
goods_brief = models.TextField(verbose_name="商品简短描述", default=500)
goods_desc = UEditorField(verbose_name=u"内容", imagePath="goods/images/", width=1000, height=300,
filePath="goods/files/", default="")
ship_free = models.BooleanField(verbose_name="是否承担运费", default=True)
goods_front_image = models.ImageField(verbose_name="封面图", upload_to="goods/images/", null=True, blank=True)
is_new = models.BooleanField(verbose_name="是否新品", default=False)
is_hot = models.BooleanField(verbose_name="是否热销", default=False)
add_time = models.DateTimeField(verbose_name="添加时间", default=datetime.now)
class Meta:
verbose_name = '商品'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class GoodsImage(models.Model):
"""
商品轮播图
"""
goods = models.ForeignKey(Goods, verbose_name="商品", related_name="images",on_delete=models.CASCADE)
image = models.ImageField(verbose_name="图片", upload_to="", null=True, blank=True)
add_time = models.DateTimeField(verbose_name="添加时间", default=datetime.now)
class Meta:
verbose_name = "商品轮播图"
verbose_name_plural = verbose_name
def __str__(self):
return self.goods.name
class Banner(models.Model):
"""
轮播的商品
"""
goods = models.ForeignKey(Goods, verbose_name="商品",on_delete=models.CASCADE)
image = models.ImageField(verbose_name="轮播图片", upload_to="banner")
index = models.IntegerField(verbose_name="轮播顺序", default=0)
add_time = models.DateTimeField(verbose_name="添加时间", default=datetime.now)
class Meta:
verbose_name = "轮播商品"
verbose_name_plural = verbose_name
def __str__(self):
return self.goods.name
class IndexAd(models.Model):
category = models.ForeignKey(GoodsCategory, related_name='category', verbose_name="商品类目",on_delete=models.CASCADE)
goods = models.ForeignKey(Goods, related_name='goods',on_delete=models.CASCADE)
class Meta:
verbose_name = '首页商品类别广告'
verbose_name_plural = verbose_name
def __str__(self):
return self.goods.name
class HotSearchWords(models.Model):
"""
热搜词
"""
keywords = models.CharField(default="", max_length=20, verbose_name="热搜词")
index = models.IntegerField(default=0, verbose_name="排序")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = '热搜词'
verbose_name_plural = verbose_name
def __str__(self):
return self.keywords
|
989,194 | e96026664ecc7adc15f2737a1ce12336526476ad | #!/usr/bin/python3
# Universidad Simon Bolívar
#
# Traductores e interpretadores - CI3715
#
# Manuel Gomes. Carnet: 11-10375
# Ricardo Vethencourt. Carnet: 09-10894
#
# Proyecto 1
# Programa que realiza un análisis lexicográfico de un archivo.
import sys
from lexNEO import *
from parserNeo import *
# Generando el lexer
lexer = lex.lex()
lexer.input(content)
# Tokenizando
while (True):
tok = lexer.token()
if (not tok):
break
if (tok.type == 'TkId' or tok.type == 'TkNum' or\
tok.type == 'TkCaracter'):
tokenlist.tokens.append(token(tok.type, tok.lineno,\
find_column(content, tok), tok.value))
else:
tokenlist.tokens.append(token(tok.type, tok.lineno,\
find_column(content, tok)))
#Imprime tokens y errores en pantalla.
tokenlist.printTokens()
tokenlist.printError()
### SEGUNDA ETAPA #####################################################
lexer.lineno = 1
parser = yacc.yacc()
parser.parse(content)
|
989,195 | a19b3edb19d00d8b15510aeeb6104a14b1ce0ced | import pyrebase
from bluetooth import *
import socket
import time
config = {
"apiKey": "API key",
"authDomain": "proxy-193e1.firebaseapp.com",
"databaseURL": "https://proxy-193e1.firebaseio.com/",
"storageBucket": "proxy-193e1.appspot.com"
}
while True:
firebase = pyrebase.initialize_app(config)
database = firebase.database()
nearby_devices = discover_devices(lookup_names = False)
print("found %d devices" % len(nearby_devices))
print("BD_ADDR: " + str(nearby_devices))
#finds number of bluetooth devices and writes to firebase
database.child("BD_ADDR").push(str(nearby_devices))
database.child("NUM DEVICES").set(str(len(nearby_devices)))
time.sleep(60)
|
989,196 | 308c1b45a02cda333a5e053f23e101132bd398f3 | #Write a Python program to convert temperatures to and from celsius, fahrenheit
ans=int(input("1 for cel to Fah, 2 for Fah to cel"))
If ans == 1:
cel = int(input("enter a temp in cel"))
fah = (cel - 9/5) + 32
print('%.2f Celsius is: %.2f Fahrenheit' %(cel, fah))
else:
fah = int(input("yourtemp in Fah"))
cel = (fah - 32) * 5/9
print('%.2f Fahrenheit is: %.2f Celsius' %(fah, cel))
|
989,197 | 2105023ed908105a5feede2c645f59599195553c | """
Estructura de datos y algoritmos 1
Laboratorio 4
Punto 2.2
Sebastian Castaño Orozco 201610054014
Dennis Castrillón Sepúlveda 201610035014
"""
class BinarySearchTree:
def __init__(self):
self.root = None
def __iter__(self):
return self.root.__iter__()
def insert_aux(self, value, node):
if node == None:
node = TreeNode(value)
return node
if value < node.value:
node.left = BinarySearchTree.insert_aux(self, value, node.left)
elif value > node.value:
node.right = BinarySearchTree.insert_aux(self, value, node.right)
return node
def insert(self, value):
if self.root == None:
self.root = TreeNode(value)
else:
BinarySearchTree.insert_aux(self, value, self.root)
def search_aux(self, value, node):
if node == None:
print("Not found")
return False
elif node.value == value:
print("Found")
return True
if node.value > value:
return BinarySearchTree.search_aux(self, value, node.left)
elif node.value < value:
return BinarySearchTree.search_aux(self, value, node.right)
else:
return False
def search(self, value):
BinarySearchTree.search_aux(self, value, self.root)
def sumaElCamino(self,a,suma):
a=TreeNode(a)
if (a == None):
return False
if (a.left == None and a.right == None):
return suma == a
else:
return (BinarySearchTree.sumaElCamino(self,a.left, suma-a) or BinarySearchTree.sumaElCamino(self,a.right,suma-a))
class TreeNode:
def __init__(self, value, left = None, right = None):
self.value = value
self.left = None
self.right = None
arbol = BinarySearchTree()
arbol.insert(5)
arbol.insert(4)
arbol.insert(8)
print(arbol.sumaElCamino(arbol.root,9)) |
989,198 | aae8c0d1f3a103ba7bc0109a9bdbe2b3da426eb7 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable as V
import numpy as np
import gym
import os
import config
import utils
class Actor(nn.Module): # actor holds policy network, predicting which action to take
def __init__(self, states_dim=config.states_dim, action_dim=config.action_dim):
super(Actor, self).__init__()
self.fc1 = nn.Linear(states_dim, config.hidden_dim_pi1)
self.fc2 = nn.Linear(config.hidden_dim_pi1, config.hidden_dim_pi2)
self.fc3 = nn.Linear(config.hidden_dim_pi2, config.action_dim)
utils.init_nns([self.fc1, self.fc2, self.fc3])
def forward(self, s): # predict action
tmp = self.fc1(s)
tmp = F.relu(self.fc2(tmp))
tmp = torch.tanh(self.fc3(tmp))
return (tmp * 2) # action in 'Pendulum-v0' ranges from -2 to 2
def choose_action(self, s):
act = self.forward(s)
return act.detach() # action itself should be seen as a independent variable when calculating gradient
class Critic(nn.Module): # critic holds Q-value network, predicting the value of state-action pair
def __init__(self, states_dim=config.states_dim, action_dim=config.action_dim):
super(Critic, self).__init__()
self.fc1 = nn.Linear(states_dim, config.hidden_dim_v1)
self.fc2 = nn.Linear(config.hidden_dim_v1 + action_dim, config.hidden_dim_v2)
self.fc3 = nn.Linear(config.hidden_dim_v2, 1)
utils.init_nns([self.fc1, self.fc2, self.fc3])
def forward(self, s, a):
tmp = F.relu(self.fc1(s))
tmp = torch.cat((tmp, a), dim=1) # add action into state-action pair
tmp = F.relu(self.fc2(tmp))
tmp = self.fc3(tmp) # critic gives a particular value
return tmp
|
989,199 | 74f1a9936cc6b7af63555a9434025c92b2547d44 | """
与えたURLのJSをひたすら集めるコード
.csv でJSのファイルを一番はじめのカラムに持つものを与えればいい
使えることを優先したコード...
"""
import logging
import os
from hashlib import sha256
from time import sleep
from typing import Any
from urllib.parse import urlparse
from urllib.request import urlopen
import sys
logger = logging.getLogger(__name__)
class DistinctError(ValueError):
"""distinctditc's error type"""
class Distinctdict(dict):
"""dict extended class"""
def __setitem__(self, key: str, value: Any) -> None:
if value in self.values():
if (
(key in self and self[key] != value) or
key not in self
):
raise DistinctError(
"This value already exists in DistinctDict"
)
super().__setitem__(key, value)
def setLogger():
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s: %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def main():
args = sys.argv
if len(args) != 3:
logger.info('need 2 args... ./get_files.py <js_url.csv> <save_dir>')
return
FILE_NAME = args[1]
DIR_NAME = args[2]
if not os.path.exists(FILE_NAME):
logger.info('There are no input file... Bye...')
return
with open(FILE_NAME, 'r') as f:
file = f.readlines()
if not os.path.exists(DIR_NAME):
os.mkdir(DIR_NAME)
file_dict = Distinctdict()
for line in file[1:]:
url = line.split(',')[0].replace('\n', '')
logger.info('target_url: %s', url)
try:
page = urlopen(url=url, timeout=5)
except UnicodeEncodeError as err:
logger.warning(f'{err}')
continue
except Exception as err:
logger.exception(f'{err}')
continue
logger.info('open url : %s', url)
file_name = make_file_name(url)
logger.info('save to : %s', file_name)
js_src = page.read()
try:
file_dict[file_name] = sha256(js_src).hexdigest()
with open(DIR_NAME + '/' + file_name, 'w') as f:
f.write(js_src.decode())
logger.info('saved! : %s', file_name)
except DistinctError as message:
logger.info(f'{message}')
pass
except Exception as err:
logger.exception(f'{err}')
sleep(1)
logger.info('Finish save %d files!', len(file_dict))
def make_file_name(url: str) -> str:
parsed = urlparse(url)
path = parsed.path.replace('/', '-') if parsed.path else ""
query = ""
if parsed.query != "":
query = sha256(parsed.query.encode()).hexdigest()[-12:]
return parsed.netloc + path + query
if __name__ == "__main__":
setLogger()
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.