blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39bfc099bc688fabf0c71e5df5cdc5a462b08d3d | 29f01d669b179591380decaf74332856d712be67 | /MCB.py | b09afab447b721d9b5efb64fd05f9ef6de38c9a2 | [] | no_license | madhawabk/IcyPlanetMassRadiusRelation | 982b776ae56ec409309b3daa5fe33c9a13e3e993 | d277b9e794a4fcd62033f21308a18383a2eb0f2d | refs/heads/master | 2023-06-25T20:11:49.765760 | 2021-07-30T16:20:03 | 2021-07-30T16:20:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,868 | py | import pylab
from math import *
from scipy.constants import G
from scipy import optimize
import matplotlib.pyplot as plt
import numpy as np
def runkut(n, x, y, h):
#"Advances the solution of diff eqn defined by derivs from x to x+h"
y0=y[:]
k1=derivs(n, x, y)
for i in range(1,n+1): y[i]=y0[i]+0.5*h*k1[i]
k2=derivs(n, x+0.5*h, y)
for i in range(1,n+1): y[i]=y0[i]+h*(0.2071067811*k1[i]+0.2928932188*k2[i])
k3=derivs(n, x+0.5*h, y)
for i in range(1,n+1): y[i]=y0[i]-h*(0.7071067811*k2[i]-1.7071067811*k3[i])
k4=derivs(n, x+h, y)
for i in range(1,n+1):
a=k1[i]+0.5857864376*k2[i]+3.4142135623*k3[i]+k4[i]
y[i]=y0[i]+0.16666666667*h*a
x+=h
return (x,y)
#----------------------------------------------------------------------------
# B-M-Stixrude parameters for Mg-post-perovskite (1st-lower mantle) and Mg-perovskite (2nd-upper mantle)
K_0 = [252.58, 128.690] # GPa
K_0_prime = [4.080, 4.264]
rho_0 = [4256.95, 3407.43] # Kg/m^3
V_0 = [24.43, 44.52] # cm^3/mol
theta_0 = [887.96, 765.067] # (K)
gamma_0 = [1.584, 0.996]
beta = 2.382
p_ini = 5.0 # Initial pressure at the core-mantle boundary (Pcmb at r_cmb)
T_ini = 1766.37 # Initial temperature at the core-mantle boundary (Tcmb at r_cmb)
m_ini = 7.37059856383e+24
density = 3542.82
g_ini = 12.04
r_ini = 6390.30
q_ini = 0.00106
T_0 = 300.0 # Reference temperature in kelvins
n0 = 1.0
R = 8.314
epsilon = 7.38 * (10**(-11))
density = []
kappa = 8.000
def integrate(f, a, b, n):
h = float(b - a) / n
result = (0.5 * f(a)) + (0.5 * f(b))
for i in range(1, n):
result += f(a + (i*h))
result *= h
return result
def differentiate(f, a, var='volume', h = 0.01):
if var == 'volume':
val = (a * V_0[1])
t1 = f((val + h) / V_0[1])
t2 = f((val - h) / V_0[1])
elif var == 'temperature':
t1 = f(a + h)
t2 = f(a - h)
return (t1 - t2) / (2.0 * h)
th = lambda t: (t**3.0) / (float(exp(t)+0.0000001) - 1.0)
c_v = lambda z: ((z**4.0) * exp(z)) / (((exp(z)+0.0000001) - 1.0)**2.0)
def f_BM_Stixrude(x, K_0, K_0_prime, theta_0, gamma_0, p, T):
f = (1.0 / 2.0) * ((x**(-2.0 / 3.0)) - 1.0) # Birch-Murnaghan-Stixrude eqn. Calculated with x = V / V_0
gamma = gamma_0 * (x**beta)
theta_x = (theta_0**2.0) * (1 + (6.0 * gamma_0 * f) + (((-6.0 * gamma_0) + (18.0 * (gamma_0**2.0)) - (9.0 * beta * gamma_0))* (f**2.0)))
if theta_x < 0:
theta = ((-1.0 * theta_x))**0.5
else:
theta = (theta_x)**0.5
ta = (3.0 * K_0 * f) * ((1.0 + (2.0 * f))**(5.0 / 2.0))
tb = (1.0 + ((3.0 / 2.0) * (K_0_prime - 4.0) * f))
V = x * V_0[1]
"Calculates the thermal pressure for the BM-Stixrude regime."
diff = ((T**4.0) * (integrate(th, 0.0, (theta/T), 600))) - ((T_0**4.0) * (integrate(th, 0.0, (theta/T_0), 600)))
Pth = (((9.0 * gamma * n0 * R) / (V * (theta**3.0))) * diff)
return (ta * tb) + (Pth / 1000.0)- p
def K_BM(x, K_0, K_0_prime, theta_0, gamma_0, T):
f = (1.0 / 2.0) * ((x**(-2.0 / 3.0)) - 1.0)
gamma = gamma_0 * (x**beta)
theta_x = (theta_0**2.0) * (1 + (6.0 * gamma_0 * f) + (((-6.0 * gamma_0) + (18.0 * (gamma_0**2.0)) - (9.0 * beta * gamma_0))* (f**2.0)))
theta = (theta_x)**0.5
ext = ((1.0 + (2.0 * f))**(5.0 / 2.0))
tc = K_0 + ((3.0 * K_0 * K_0_prime) - (5.0 * K_0)) * f
td = (27.0 / 2.0) * ((K_0 * K_0_prime) - (4.0 * K_0)) * (f**2.0)
K_i = ext * (tc + td)
V = x * V_0[1]
"Calculates the thermal pressure for the bulk modulus."
diff_a = ((T**4.0) * (integrate(th, 0.0, (theta/T), 600))) - ((T_0**4.0) * (integrate(th, 0.0, (theta/T_0), 600)))
E_har = ((9.0 * gamma * n0 * R) / (V * (theta**3.0))) * diff_a
diff_b = ((T**4.0) * (integrate(c_v, 0.0, (theta/T), 600))) - ((T_0**4.0) * (integrate(c_v, 0.0, (theta/T_0), 600)))
C_v = ((9.0 * (gamma**2.0) * n0 * R) / (V * (theta**3.0))) * diff_a
K_th = ((gamma + 1.0 - beta) * E_har) - C_v
return K_i + K_th
def p_th(x, K_0, K_0_prime, theta_0, gamma_0, T):
f = (1.0 / 2.0) * ((x**(-2.0 / 3.0)) - 1.0)
gamma = gamma_0 * (x**beta)
theta_x = (theta_0**2.0) * (1 + (6.0 * gamma_0 * f) + (((-6.0 * gamma_0) + (18.0 * (gamma_0**2.0)) - (9.0 * beta * gamma_0))* (f**2.0)))
theta = (theta_x)**0.5
V = x * V_0[1]
"Calculates the thermal pressure for the BM-Stixrude regime."
diff = ((T**4.0) * (integrate(th, 0.0, (theta/T), 600))) - ((T_0**4.0) * (integrate(th, 0.0, (theta/T_0), 600)))
Pth = (((9.0 * gamma * n0 * R) / (V * (theta**3.0))) * diff)
#print "Therm", Pth
return Pth / 1000.0
def derivs(n, x, y):
#"The function DERIVS calculates y' from x and y"
dy=[0 for i in range(0,n+1)]
s2 = optimize.brentq(f_BM_Stixrude, 0.01, 6.0, args=(K_0[1], K_0_prime[1], theta_0[1], gamma_0[1], y[1], y[4]))
V = s2 * V_0[1]
rho = (1 / s2) * rho_0[1]
gamma_m = gamma_0[1] * (s2**beta)
f = (1.0 / 2.0) * ((s2**(-2.0 / 3.0)) - 1.0)
theta_x = (theta_0[1]**2.0) * (1 + (6.0 * gamma_0[1] * f) + (((-6.0 * gamma_0[1]) + (18.0 * (gamma_0[1]**2.0)) - (9.0 * beta * gamma_0[1]))* (f**2.0)))
theta = (theta_x)**0.5
tx = p_th(s2, K_0[1], K_0_prime[1], theta_0[1], gamma_0[1], y[4])
ty = p_th(s2, K_0[1], K_0_prime[1], theta_0[1], gamma_0[1], (y[4] + 0.01))
dydx_2 = (ty - tx) / 0.01
Cv = dydx_2 * (V / gamma_m)
fg = lambda xs: f_BM_Stixrude(xs, K_0[1], K_0_prime[1], theta_0[1], gamma_0[1], 0.0, y[4])
dydx = differentiate(fg, s2, var='volume')
K_T = dydx * (-1.0) * (s2 * V_0[1])
alpha = (gamma_m * Cv) / (K_T * V)
K_s = K_T * (1.0 + (alpha * gamma_m * y[4]))
print("density is", rho)
density.append(rho)
dy[1] = (-1.0) * (10**(-6.0))*(rho) * (y[3]) #dy[1] is dP
dy[2] = (10**9.0) * 4.0 * pi * (x**2) * (rho) #dy[2] is dm
dy[3] = (10**3.0) * (4.0 * pi * G * (rho)) - ((2.0 * y[3])/ x ) #dy[3] is dg
dy[4] = (-1.0) * (10**(5.0)) * (y[5] / kappa) #dy[4] is dT
dy[5] = ((rho * epsilon) - ((2.0 * y[5]) / (x * 1.000))) #dy[5] is dq
return dy
#----------------------------------------------------------------------------
N=10 # Step size
#x=r
#m=y[2] mass at centre is zero
# Radius r is in Km (IMPORTANT)
mass = []
radius = []
pressure = []
gravity = []
Temperature = []
x=r_ini; y=[0.0, p_ini, m_ini, g_ini, T_ini, q_ini] # Sets Boundary Conditions
while x < 6440.30:
(x,y) = runkut(5, x, y, 1.0/N)
print("mass is", y[2], "Kg")
print("radius is", x, "Km")
print("pressure is", y[1], "GPa")
print("gravity is", y[3])
print("Temperature is", y[4], "Kelvin")
print("Q is", (y[5] * 100))
mass.append(y[2])
radius.append(x)
pressure.append(y[1])
gravity.append(y[3])
Temperature.append(y[4])
import csv
with open('Proxima_conductive_Upmantle_1.txt', 'w+') as x:
writer = csv.writer(x, delimiter='\t')
writer.writerows(zip(pressure, Temperature, radius))
with open('Proxima_conductive_Upmantle_2.txt', 'w+') as x:
writer = csv.writer(x, delimiter='\t')
writer.writerows(zip(mass, gravity, radius))
with open('Proxima_conductive_Upmantle_3.txt', 'w+') as x:
writer = csv.writer(x, delimiter='\t')
writer.writerows(zip(density))
plt.plot(radius, pressure, color='blue')
plt.xlabel('Radius (Km)')
plt.ylabel('Pressure (GPa)')
plt.show()
#plt.plot(radius, density, color='green')
#plt.xlabel('Radius (Km)')
#plt.ylabel('Density (Kg/m$^3$)')
#plt.show()
plt.plot(radius, gravity, color='red')
plt.xlabel('Radius (Km)')
plt.ylabel('Gravity (m/s$^2$)')
plt.show()
print("Done")
| [
"sasiruravihansa@gmail.com"
] | sasiruravihansa@gmail.com |
d5b1789b3dd0c839b86dd726c2e71effbdd484ab | 371277a2586e85337cd50a0e2889a962b89fbca0 | /Semana 5/Subida de Archivos Flask - Portfolio/models/conocimiento.py | e4e67176849e22c32a4fec64a7569de0c8b0abe9 | [] | no_license | Jesuscueva/Virtual-Back-5 | eca62561f19a3028880e3a68868ff4f1c271d579 | 2f4557a6cdae91c9fd4f22103b5bdd473845d5a4 | refs/heads/main | 2023-04-02T13:47:58.161874 | 2021-04-10T02:09:01 | 2021-04-10T02:09:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | from config.base_datos import bd
from sqlalchemy import Column, types
from sqlalchemy.schema import ForeignKey
class ConocimientoModel(bd.Model):
__tablename__ = 't_conocimiento'
conocimientoId = Column(
name='conocimiento_id',
type_=types.Integer,
primary_key=True,
unique=True,
autoincrement=True,
nullable=False
)
conocimientoTitulo = Column(
name='conocimiento_titulo',
type_=types.String(45),
nullable=False
)
conocimientoPuntuacion = Column(
name='conocimiento_puntuacion',
type_=types.DECIMAL(2,1),
nullable=False
)
conocimientoImagenTN = Column(
name='conocimiento_imagen_thumbnail',
type_=types.TEXT,
nullable=False
)
conocimientoImagenLarge = Column(
name='conocimiento_imagen_large',
type_=types.TEXT,
nullable=False
)
conocimientoDescripcion = Column(
name='conocimiento_descripcion',
type_=types.String(200),
nullable=False
)
# FK
categoria = Column(
ForeignKey('t_categoria.cat_id'),
name='cat_id',
type_=types.Integer,
nullable=False
)
def __init__(self, titulo, puntuacion, imagentn, imagenl, descripcion, categoria):
self.conocimientoTitulo = titulo
self.conocimientoPuntuacion = puntuacion
self.conocimientoImagenTN = imagentn
self.conocimientoImagenLarge = imagenl,
self.conocimientoDescripcion = descripcion
self.categoria = categoria
def save(self):
bd.session.add(self)
bd.session.commit()
def json(self):
return {
'conocimiento_id': self.conocimientoId,
'conocimiento_titulo': self.conocimientoTitulo,
'conocimiento_puntuacion': str(self.conocimientoPuntuacion),
'conocimiento_imagen_thumbnail': self.conocimientoImagenTN,
'conocimiento_imagen_large': self.conocimientoImagenLarge,
'conocimiento_descripcion': self.conocimientoDescripcion,
'cat_id': self.categoria,
} | [
"ederiveroman@gmail.com"
] | ederiveroman@gmail.com |
93239fc5a08525d57708c1f823bde02ddff2f792 | 2ede3d8003efd70bff2ff4935f22eddacb9a7f53 | /ebsa/migrations/0002_auto_20160216_1735.py | c765617f1ed6684489c65484355341068f23accf | [] | no_license | sreilly/ebsa | ab25d163ce4b794db775399a3ff8a026812aeae1 | 2760a9095dbcfefcc1a9ccb73a84f72749c2ba47 | refs/heads/master | 2021-01-24T20:25:20.279593 | 2016-03-04T11:52:52 | 2016-03-04T11:52:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,368 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-16 17:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ebsa', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('type', models.CharField(choices=[('e', 'Expense'), ('i', 'Income')], max_length=1)),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ebsa.Category')),
],
),
migrations.AlterModelOptions(
name='transaction',
options={'ordering': ['date']},
),
migrations.AddField(
model_name='account',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='account',
name='opening_balance',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='bank',
name='active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='account',
name='name',
field=models.CharField(max_length=32, unique=True),
),
migrations.AlterField(
model_name='account',
name='type',
field=models.CharField(choices=[('0', 'CHECKING'), ('S', 'SAVINGS'), ('C', 'CCARD')], max_length=1),
),
migrations.AlterField(
model_name='bank',
name='name',
field=models.CharField(max_length=64, unique=True),
),
migrations.AlterField(
model_name='transaction',
name='refnum',
field=models.CharField(max_length=64, unique=True),
),
migrations.AddField(
model_name='transaction',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='ebsa.Category'),
preserve_default=False,
),
]
| [
"alexis@ww.net"
] | alexis@ww.net |
ecea23ae37a6a0fd12e5da215e3f82d59b6c2318 | 6f9ba1982122c87047ec7cf5dc7ba7319f3e788e | /lianjia/items.py | 853eb2a9dd349b4f8f34b73e36f55557ae83c25d | [] | no_license | mortyxu/git-lianjia | c8504215112770e4a3388851a9e6d8104bf80a75 | 5ff785fe25069dc96d0230ef0f3bf615277e507c | refs/heads/master | 2020-03-31T06:58:49.268292 | 2018-10-08T02:11:06 | 2018-10-08T02:11:06 | 152,002,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class LianjiaItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
region = Field() #行政区域
href = Field() #房源链接
name = Field() #房源名称
style = Field() #房源结构
area = Field() #小区
orientation = Field() #朝向
decoration = Field() #装修
elevator = Field() #电梯
floor = Field() #楼层高度
build_year = Field() #建造年限
sign_time = Field() #签约时间
unit_price = Field() #每平单价
total_price = Field() #总价
fangchan_call = Field() #房产类型
school = Field() #周边学校
subway = Field() #周边地铁
| [
"38303498+mortyxu@users.noreply.github.com"
] | 38303498+mortyxu@users.noreply.github.com |
e85935828e845a8db316fa404e80e29a58be5ec2 | 2d27176ea061a237f8451ccbecc20efb7c01b155 | /ex_influxdb/connect.py | 53c7c39624450b2be82c0a0024b15c117cd69c31 | [] | no_license | WISE-PaaS/example-db-connection | 7ae0603c26534772e02b5b3ee08476e850e0b215 | 8fb2342cf76b9ec4a7d4345cfa857b400aa00a3c | refs/heads/main | 2023-02-11T23:04:59.091196 | 2020-12-31T10:13:14 | 2020-12-31T10:13:14 | 324,118,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | import os
import json
from influxdb import InfluxDBClient
def testInflux():
# Load 'ENSAAS_SERVICES' from enviroment variable and parse the URI
ensaas_services = os.getenv('ENSAAS_SERVICES')
ensaas_services = json.loads(ensaas_services)
try:
influx_database = ensaas_services['influxdb'][0]['credentials']['database']
influx_host = ensaas_services['influxdb'][0]['credentials']['externalHosts'][:-5]
influx_port = ensaas_services['influxdb'][0]['credentials']['port']
influx_user = ensaas_services['influxdb'][0]['credentials']['username']
influx_password = ensaas_services['influxdb'][0]['credentials']['password']
except:
return "Cannot get InfluxDB credentials from $ENSAAS_SERVICES environment variable!"
try:
client = InfluxDBClient(host=influx_host, port=influx_port, username=influx_user, password=influx_password)
except:
return "Unable to connect to InfluxDB instance!"
# Print the list of databases to verify connection
dbList = client.get_list_database()
print(dbList)
client.close()
return json.dumps(dbList)
| [
"stanley.yeh@advantech.com.tw"
] | stanley.yeh@advantech.com.tw |
2652ccb0f7fd0e86cf77b13991aabd3efa46c48d | c134c473eb21bcb0eb8d6255e5467c11fd043859 | /Q106_ConstructBinaryTreeFromInorderAndPostorderTraversal.py | 119405f711ccf4e1692339a89b70800be8498f00 | [] | no_license | fivemoons/LeetCode_Python | 758efff7a669adf655e772d077134c5a02205542 | 2ef448771ce48dd5475f329474e7cc2b0bb51cc3 | refs/heads/master | 2021-01-17T20:55:33.482122 | 2016-09-20T10:09:21 | 2016-09-20T10:09:21 | 64,149,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def buildTree(self, inorder, postorder, inl=0, inr=None, postl=0, postr=None):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
inr = inr if inr!=None else len(inorder) - 1
postr = postr if postr!=None else len(postorder) - 1
if (inr - inl) < 0 or (postr - postl) < 0:
return None
else:
idx = inorder.index(postorder[postr])
root = TreeNode(postorder[postr])
root.left = self.buildTree(inorder, postorder, inl, idx-1, postl, idx-inl+postl-1)
root.right = self.buildTree(inorder, postorder, idx+1, inr, idx-inl+postl, postr-1)
return root | [
"liuyuyao1992@qq.com"
] | liuyuyao1992@qq.com |
6648dc83958d98e09181be589c965e5d5083dbc0 | e2604baf3baddbbebf8597c7a3a76bac988efb41 | /venv/bin/wsdump.py | 6f6c90fbda8537d5bc52a71ac71732e9a1879bcb | [] | no_license | Surajgupta5/Django-WIth-Docker | 2fe1037451c113feba72c50d5425d4461c2f40be | ca879e43af043dccba6b325f89ac3c6f495dbe56 | refs/heads/master | 2022-07-19T18:55:48.243626 | 2020-05-11T05:53:52 | 2020-05-11T05:53:52 | 262,951,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,437 | py | #!/home/workspace/project2/venv/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
ab4d5831098fe2e9a2d15ee4a894914b8f70c9e7 | d893c8bb1f15d52811d860c2e59c2b84b0b0b751 | /ai/train.py | 959d1654751d1a2d3f552daefe6c019bf4fbae9b | [
"MIT"
] | permissive | bajcmartinez/kickass-chess | acd258f6c6e4e16a4af692839f7277a1eb5b7683 | 397d2f40b403e3b2b590cd54b708e97a4108316a | refs/heads/master | 2020-04-17T10:30:33.654778 | 2019-01-21T18:09:35 | 2019-01-21T18:09:35 | 166,503,723 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,106 | py | import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.optimizers import Adam
from keras.losses import categorical_crossentropy
import keras
if __name__ == '__main__':
batch_size = 128
epochs = 100
# Load the entire dataset
with np.load("datasets/states.npz") as f:
X = f['arr_0']
Y = f['arr_1']
np.random.seed(113)
indices = np.arange(len(X))
np.random.shuffle(indices)
X = X[indices]
Y = Y[indices]
# split data
cut = int(len(X) / 3)
x_train = X[:cut]
y_train = Y[:cut]
x_validation = X[cut + 1:cut + cut]
y_validation = Y[cut + 1:cut + cut]
x_test = X[cut + cut + 1:]
y_test = Y[cut + cut + 1:]
y_train = keras.utils.to_categorical(y_train, 2)
y_test = keras.utils.to_categorical(y_test, 2)
y_validation = keras.utils.to_categorical(y_validation, 2)
print("Total records: ", len(X))
# Load the model
model = Sequential()
model.add(Conv2D(16, kernel_size=1, activation='relu', input_shape=X[0].shape))
model.add(Conv2D(16, kernel_size=1, activation='relu'))
model.add(Conv2D(32, kernel_size=1, activation='relu'))
model.add(Conv2D(32, kernel_size=1, activation='relu'))
model.add(Conv2D(64, kernel_size=1, activation='relu'))
model.add(Conv2D(64, kernel_size=1, activation='relu'))
model.add(Conv2D(128, kernel_size=1, activation='relu'))
model.add(Conv2D(128, kernel_size=1, activation='relu'))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss=categorical_crossentropy,
optimizer=Adam(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_validation, y_validation))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save("datasets/model.h5")
| [
"bajcmartinez@gmail.com"
] | bajcmartinez@gmail.com |
2e3a99a2a2f8ea1a985a74d8d043771932f25fde | 8a2cb7bf6b7e819ea554781a5775fc9411ac3054 | /study_flask/app/web/book.py | d1d706c9c11638b44f284b133013a8dc8b295bd0 | [] | no_license | Foxgeek36/lagou_data_analysis | 9aca8f9d6bc9be7fcd86ba1d0d1b8c4b8c0901a5 | 14b1b325c8af81f13b6e90ad4d6ec6da22a00739 | refs/heads/master | 2020-07-20T20:33:40.956532 | 2019-06-24T02:44:22 | 2019-06-24T02:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | #Email:dazhuang_python@sina.com
from flask import request, render_template
from flask.json import jsonify
from app.forms.book_forms import SearchForm
from app.view_models.book_view_model import BookCollection
from app.web.blue_print import web_blue
from app.libs.helper import isbn_or_key
from app.spider.yushu_book import YuShuBook
import json
@web_blue.route("/")
def index():
r = {
"name":"dazhuang",
"age":20
}
#使用render_template渲染html
return render_template('test1.html',data=r)
#定义路由,当路由中加上了末尾的斜杠,浏览器在请求不加斜杠的路径时会302到加斜杠的路径上
#通过q和page传递参数
#更改为通过蓝图注册,蓝图最终还需要注册到app上
@web_blue.route("/book/search/")
#定义视图函数
def search():
form = SearchForm(request.args)
books = BookCollection()
#使用validate启用验证
if form.validate():
q = form.q.data.strip()
page = form.page.data
#判断传入的q是ISBN还是普通关键字
isbn_or_key_value = isbn_or_key(q)
yushu_book = YuShuBook()
if isbn_or_key_value == 'isbn':
yushu_book.search_by_isbn(isbn=q)
else:
yushu_book.search_by_keyword(keyword=q,page=page)
books.fill(yushu_book=yushu_book,keyword=q)
# return jsonify(books)
return json.dumps(books,default = lambda o:o.__dict__)
else:
return jsonify({"msg":"传入参数有误"}) | [
"1780292537@qq.com"
] | 1780292537@qq.com |
82ea99ed4413fd07403f69621525bcf0df58dc1c | ed8ed9d4d6a8a53891544427df2fe3ffe25efc31 | /train/train.py | 78b7e57383ce274fa8b244cb77a36fe8e20a0799 | [] | no_license | atkatchev/deploying-sentiment-analysis-model | 62dc0c58f9a567da253916024da4a66313b98c8b | bb147e9d662c74deb5c3107f802bdd8a091e7bae | refs/heads/master | 2020-10-01T09:31:41.740721 | 2019-12-12T04:13:36 | 2019-12-12T04:13:36 | 227,509,644 | 0 | 0 | null | 2019-12-12T04:13:36 | 2019-12-12T03:12:07 | HTML | UTF-8 | Python | false | false | 6,335 | py | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import torch
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def _get_train_data_loader(batch_size, training_dir):
print("Get train data loader.")
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()
train_ds = torch.utils.data.TensorDataset(train_X, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
def train(model, train_loader, epochs, optimizer, loss_fn, device):
"""
This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
train_loader - The PyTorch DataLoader that should be used during training.
epochs - The total number of epochs to train for.
optimizer - The optimizer to use during training.
loss_fn - The loss function used for training.
device - Where the model and data should be loaded (gpu or cpu).
"""
# TODO: Paste the train() method developed in the notebook here
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0
for batch in train_loader:
batch_X, batch_y = batch
batch_X = batch_X.to(device)
batch_y = batch_y.to(device)
# TODO: Complete this train method to train the model provided.
model.zero_grad()
out = model.forward(batch_X)
loss = loss_fn(out, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("Epoch: {}, BCELoss: {}".format(epoch, total_loss / len(train_loader)))
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments when the script
# is executed. Here we set up an argument parser to easily access the parameters.
parser = argparse.ArgumentParser()
# Training Parameters
parser.add_argument('--batch-size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Model Parameters
parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',
help='size of the word embeddings (default: 32)')
parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',
help='size of the hidden dimension (default: 100)')
parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',
help='size of the vocabulary (default: 5000)')
# SageMaker Parameters
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
# Load the training data.
train_loader = _get_train_data_loader(args.batch_size, args.data_dir)
# Build the model.
model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)
with open(os.path.join(args.data_dir, "word_dict.pkl"), "rb") as f:
model.word_dict = pickle.load(f)
print("Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.".format(
args.embedding_dim, args.hidden_dim, args.vocab_size
))
# Train the model.
optimizer = optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()
train(model, train_loader, args.epochs, optimizer, loss_fn, device)
# Save the parameters used to construct the model
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'embedding_dim': args.embedding_dim,
'hidden_dim': args.hidden_dim,
'vocab_size': args.vocab_size,
}
torch.save(model_info, f)
# Save the word_dict
word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')
with open(word_dict_path, 'wb') as f:
pickle.dump(model.word_dict, f)
# Save the model parameters
model_path = os.path.join(args.model_dir, 'model.pth')
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
| [
"alextkatchev17@gmail.com"
] | alextkatchev17@gmail.com |
7a5019f032d3564ba4d7d1adff38dcfc7b6dad35 | d109b64bfa8c80a6ec7d647beeadf9fe1c667fac | /class1101/LED11.py | ef6615d3a9cc7c3e69773a9d62dd8e9899f26d39 | [] | no_license | jumbokh/micropython_class | d34dd0a2be39d421d3bbf31dbb7bfd39b5f6ac6f | 950be81582dba970e9c982e2e06fa21d9e9a0fdd | refs/heads/master | 2022-10-10T22:27:02.759185 | 2022-10-01T14:44:31 | 2022-10-01T14:44:31 | 173,898,623 | 4 | 3 | null | 2020-03-31T09:57:23 | 2019-03-05T07:40:38 | Jupyter Notebook | UTF-8 | Python | false | false | 160 | py | from machine import Pin
import utime
LED = None
LED = Pin(11, Pin.OUT)
while True:
LED.value(0)
utime.sleep(1)
LED.value(1)
utime.sleep(1) | [
"jumbokh@gmail.com"
] | jumbokh@gmail.com |
5ffb8cea15446256dcc29283f7b13541b743e7c7 | 2df764eefd511bf85273d88d94ee1b776af1779f | /web010/models/weibo.py | 80f0131f8bff44fed9f5219899e64f175d758e70 | [] | no_license | ymcat626/web_project | 2b241b901f9d083e9333785045ceddeb72749336 | 990d9b4f5b1068e7a928dad7196cc98729f25253 | refs/heads/master | 2020-03-18T19:41:07.518253 | 2018-09-10T10:52:04 | 2018-09-10T10:52:04 | 135,170,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | from . import Model
from .user import User
# 微博类
class Weibo(Model):
def __init__(self, form, user_id=-1):
self.id = form.get('id', None)
self.content = form.get('content', '')
# self.c
# 和别的数据关联的方式, 用 user_id 表明拥有它的 user 实例
self.user_id = form.get('user_id', user_id)
def comments(self):
# return [c for c in Comment.all() if c.weibo_id == self.id]
return Comment.find_all(weibo_id=self.id)
# 评论类
class Comment(Model):
def __init__(self, form, user_id=-1):
self.id = form.get('id', None)
self.content = form.get('content', '')
# 和别的数据关联的方式, 用 user_id 表明拥有它的 user 实例
self.user_id = form.get('user_id', user_id)
self.weibo_id = int(form.get('weibo_id', -1))
def user(self):
u = User.find_by(id=self.user_id)
return u
| [
"meattimes@live.cn"
] | meattimes@live.cn |
9facc5c16f98982ee060f50e2dc665bfba85eee8 | cb84d6a8100554079e1f816cb6634a001849f29f | /pysac/mhs_atmosphere/parameters/__init__.py | ecbf1d86ac0bddae2fbcb406f50f85148a904568 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | SWAT-Sheffield/pysac | 8d25e09188c852b82d4f1813e0d1cc2784c464ce | 0a92c7bf62ef6ac0530787d03cbc276d66a8273b | refs/heads/master | 2020-04-06T07:06:57.091190 | 2016-10-17T11:27:53 | 2016-10-17T11:27:53 | 25,865,703 | 1 | 3 | null | 2016-02-19T12:25:14 | 2014-10-28T11:15:08 | Python | UTF-8 | Python | false | false | 142 | py | #from units_const import *
#from model_pars import *
#from options import *
import units_const
import model_pars
import options
| [
"fred.gent.ncl@gmail.com"
] | fred.gent.ncl@gmail.com |
d63a8359e7b3a7e5e2b38953a52bd3879d8075ce | bb8df26d7cf5e2a5614a1f1e002cefe9890d6ba3 | /venv/bin/pip | ad8cbe5a58182a4fd9364d6bef8bc5f90ec4cce2 | [
"MIT"
] | permissive | ngenohzephenia/flask-week-2-IP | 80f1ad3faa1c9e3b2435ec1b5a8308aadcd16da1 | 2b314b3098ca8c859344217e660cf937b8d4d9f2 | refs/heads/master | 2023-03-11T22:53:16.972596 | 2021-03-02T20:31:47 | 2021-03-02T20:31:47 | 340,302,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | #!/home/moringaaccess/Desktop/News-API/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ngenohzef@gmail.com"
] | ngenohzef@gmail.com | |
99210570127cc5fece4e75eb505550b20b82dd51 | a2d2c2f640f8f50b126b77eeeea2ec0f438fdf0c | /day_2/answer.py | d72e884e596b6e80d84cb28ff846a96ac1c70db5 | [] | no_license | alexagriffith/advent_of_code_20 | 92306b884cc042444ed6d8c08ea2258c1d7374f0 | 9b49ab683109bfaf69beca7b8ef4349d73d92fdd | refs/heads/master | 2023-02-02T01:37:30.543775 | 2020-12-16T04:35:57 | 2020-12-16T04:35:57 | 318,681,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py | import re
def read_input():
# group the min, max, letter, and password
r = re.compile("([0-9]+)\\-([0-9]+)\\s([a-z])\\:\\s([a-z]+)")
f = open("input.txt", "r")
policies_passwords = []
for line in f:
grouped = r.findall(line)[0]
# this isn't efficient but whatever
policy_password = dict(min=int(grouped[0]), max=int(grouped[1]), letter=grouped[2], pwd=grouped[3])
policies_passwords.append(policy_password)
f.close()
return policies_passwords
def validate_num_occurences(policies_passwords):
num_valid = 0
for policy_password in policies_passwords:
# this feels inefficient too
count = policy_password['pwd'].count(policy_password['letter'])
if policy_password['min'] <= count <= policy_password['max']:
num_valid += 1
return num_valid
def validate_position_occurnces(policies_passwords):
num_valid = 0
#data bc im lazy
for data in policies_passwords:
in_min = data['pwd'][data['min'] - 1] == data['letter']
in_max = data['pwd'][data['max'] - 1] == data['letter']
if in_min + in_max == 1:
num_valid += 1
return num_valid
if __name__ == '__main__':
policies_passwords = read_input()
print(validate_num_occurences(policies_passwords))
print(validate_position_occurnces(policies_passwords))
| [
"agriffith@bluecore.com"
] | agriffith@bluecore.com |
cc0709ce6018df1945ef22e48c1aa21db809d017 | ebac8f2944d35cf42137fe22ddbb7ecbd552618d | /day1.py | 49cb0c2aeadd552902c7d4441ce6180df3c1442a | [] | no_license | ShubhamCoder007/Coding-problems | f686f7adb02f9d15fe1ad2a5d615439cc6d7f2bf | 5b3c2b3b8babfe7cfeee2a75b3842fb19f93c110 | refs/heads/master | 2020-04-23T08:54:57.063677 | 2019-10-14T17:10:15 | 2019-10-14T17:10:15 | 171,052,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | '''Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
Bonus: Can you do this in one pass?'''
a = [10, 15, 3, 7]
target = int(input("Enter the target: "))
def list_input(n):
for i in range(1,n):
a.append(int(input("Enter the number:")))
def compute(target, a):
for i,j in zip(a,range(1,len(a))):
if target < min(a):
return False
if i is target:
return True
elif target - i in a:
return True
else:
return compute(target - i,a) or compute(target, a[j:])
print(compute(target,a)) | [
"noreply@github.com"
] | ShubhamCoder007.noreply@github.com |
ab7f2304caaf730267119f94fb25d35965564aca | 27fe9c0fb5f5459241d11a205bfa7c887d643db1 | /2020/Day03_p2.py | 007290dddfe7de1c5237cb1d88d7f85be574cdf8 | [] | no_license | johnrozmaryn/adventofcode | 21c5fdd8d3a95882ce04103484538a215c49491c | 80d67371cfedb71eda0e35cd9130ee25b6c39091 | refs/heads/master | 2023-01-05T19:21:46.034612 | 2022-12-27T15:13:14 | 2022-12-27T15:13:14 | 225,509,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 16:36:58 2020
@author: jrozmaryn
"""
treelist = []
yval = 0
f=open("Day03.data","r")
contents = f.readlines()
#figure out the dimensions of the map
numlines = len(contents)
numcolumns = len(contents[0])-1 #stripping out the newline
def checkfortrees(xinc,yinc):
xpos = 0
ypos = 0
treeshit = 0
while ypos < numlines :
xpos = (xpos + xinc) % numcolumns
ypos += yinc
if (xpos,ypos) in treelist:
treeshit += 1
return treeshit
#Make a nice list with x/y pairs for the trees
for line in contents:
strippedline = line.rstrip()
xval = 0
for i in strippedline:
if i == '#':
treelist.append((xval,yval))
xval += 1
yval += 1
print(checkfortrees(1,1)*
checkfortrees(3,1)*
checkfortrees(5,1)*
checkfortrees(7,1)*
checkfortrees(1,2))
| [
"JROZMARYN@BECKMAN.COM"
] | JROZMARYN@BECKMAN.COM |
225cf46464767cc87e3de5acdd111b3d2d50c482 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2594/60640/267498.py | 06ef98d3317b58f2f24605f47cf036c4eaa392d4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | """
O(N)
"""
t = int(input())
for i in range(t):
inp = list(input())
set_inp = list(set(inp))
if len(set_inp) == len(inp):
print(-1)
else:
MAX_CHAR = 256
# 记录每个字符的位置,-1表示从未出现过
firstIndex = [-1 for x in range(MAX_CHAR)]
res = 0
for j in range(len(inp)):
# 字符的位置
start = firstIndex[ord(inp[j])]
# 字符第一次出现,更新其位置
if start == -1:
firstIndex[ord(inp[j])] = j
# 字符再次出现,此时的位置是j,减掉原来存储的位置start,获取中间字符长度
else:
res = max(res, abs(j-start-1))
print(res)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
0ba65146cf6659db44dd47b906d4f6f8ea99fa48 | 6e800b3513537622df14bb598abe9c051116106c | /51-100/088MergeEasy.py | 14ead1c1a42033d242f68373f994af50d34f3ddf | [] | no_license | Huxhh/LeetCodePy | fd72f03193d1f0b58c44bffc46a9a59ba9714215 | 6a99e84c5742ca68012b14da362f6c3255e10b21 | refs/heads/master | 2023-06-09T09:23:54.209025 | 2023-05-31T16:29:03 | 2023-05-31T16:29:03 | 148,866,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | # coding=utf-8
def merge(nums1, m, nums2, n):
index = m + n - 1
x = m - 1
y = n - 1
while True:
if x < 0 and y < 0:
break
if x >= 0 and y >= 0:
if nums1[x] >= nums2[y]:
nums1[index] = nums1[x]
x -= 1
else:
nums1[index] = nums2[y]
y -= 1
index -= 1
elif x >= 0 > y:
nums1[index] = nums1[x]
x -= 1
index -= 1
elif x < 0 <= y:
nums1[index] = nums2[y]
y -= 1
index -= 1
return nums1
def merge2(nums1, m, nums2, n):
index = m + n - 1
while m > 0 and n > 0:
if nums1[m-1] > nums2[n-1]:
nums1[index] = nums1[m-1]
m -= 1
else:
nums1[index] = nums2[n-1]
n -= 1
index -= 1
if n > 0:
nums1[:n] = nums2[:n]
return nums1
if __name__ == '__main__':
nums1 = [1, 2, 3, 0, 0, 0]
m = 3
nums2 = [2, 5, 6]
n = 3
print(merge2(nums1, m, nums2, n)) | [
"563255387@qq.com"
] | 563255387@qq.com |
77f53849e9baa5ba00cf4fecb76c44a81b14081f | 173624dde6173ac15d2bf5b9737d3934116dc6cd | /Leetcode题解/101.对称二叉树.py | 1ea68d3d89a6c146ca41e17740ce06b131c69918 | [] | no_license | jiufang7/git_repository | 79bb780e563f1aad5fe91ce53b091abaaf5b9a31 | e3d69ec8640f1caab74f1a4473888022f72de644 | refs/heads/master | 2023-01-21T01:31:18.265359 | 2020-12-01T05:21:31 | 2020-12-01T05:21:31 | 304,177,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
def check(node1, node2):
if not node1 and not node2 :
return True
elif not node1 or not node2:
return False
else:
if node1.val != node2.val:
return False
else:
return check(node1.left, node2.right) and check(node1.right, node2.left)
return check(root, root)
# 2020.10.15
# 二叉树镜像对称,第一个反应是中序遍历是个回文
# 后来发现不对,单纯的中序遍历的话,还要考虑左右子树的深度一样才可以
# 最后用的递归 检查对称两个节点左右子树是否镜像对称 | [
"646428502@qq.com"
] | 646428502@qq.com |
c91095bd52b791805e852fbb423a0d0a80917e51 | e61cbd9d69454f096bdcbc9a802354b53b643e62 | /BsToPhiMuMu/test/crabConfig_2016B_Charmonium_MINIAOD.py | 690a9327e97dc1b0625930a7d77210e25f967075 | [] | no_license | rishabh-raturi/BsToPhiMuMu_angular_analysis | 47248133b44b56f216c1063ab07b4283d18be1cb | d3199e834055c2c396350d2904baf8e2ae224df3 | refs/heads/master | 2023-04-04T23:42:47.953299 | 2021-04-12T18:29:49 | 2021-04-12T18:29:49 | 305,628,429 | 0 | 2 | null | 2021-04-09T18:20:52 | 2020-10-20T07:41:03 | C++ | UTF-8 | Python | false | false | 2,752 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'job_crab_data_Charmonium_finaljob_MINIAOD_CMSSW10218_16B_v1'
config.General.workArea = 'crab_data_Charmonium_finaljob_MINIAOD_CMSSW10218_16B_v1'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'Charmonium_2016B_dataMINI.py'
config.JobType.outputFiles = ['BsToPhiMuMu_2016_Data.root']
config.JobType.inputFiles = ['PileupMC_2016.root','DataPileupHistogram2016_rereco.root']
#config.Data.inputDataset = '/Charmonium/Run2016B-17Jul2018-v1/MINIAOD'
config.Data.inputDataset = '/Charmonium/Run2016B-17Jul2018_ver1-v1/MINIAOD'
config.Data.inputDBS = 'global'
#config.Data.splitting = 'FileBased'
config.Data.splitting = 'LumiBased'
#config.Data.splitting = 'Automatic'
config.Data.unitsPerJob = 10#10
config.JobType.allowUndistributedCMSSW = True
config.Data.ignoreLocality = True
config.Site.whitelist = ['T2_CH_*', 'T2_UK_*', 'T2_IT_*', 'T2_US_*']
config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_ReReco_07Aug2017_Collisions16_JSON.txt'
config.Data.runRange = '272007-275376'# Era B
#config.Data.runRange = '275657-276283'# Era C
#config.Data.runRange = '276315-276811' #Era D
#config.Data.runRange = '276831-277420' #Era E
#config.Data.runRange = '277772-278808' #Era F
#config.Data.runRange = '278820-280385' #Era G
#config.Data.runRange = '280919-284044' #Era H
config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = False
#config.Data.publishDataName = 'CRAB3_BMM4_crabtest_Charmonium_Run2015B'
config.Site.storageSite = 'T2_IN_TIFR' # Your output destination. Useful to us: T2_CH_CSCS, T3_CH_PSI, T2_US_Nebraska
| [
"rr26@iitbbs.ac.in"
] | rr26@iitbbs.ac.in |
995fbd9c36e61986e2b7b52770d1883f929b0dde | dd498f63ccf60f9f6b2708bbe890291a39b52932 | /musixmatch/parser.py | 06a8c5e31fa17487e33ce6cbb5f78409f5cf2ecb | [] | no_license | pgromano/musixmatch | 8d302a28794f81e6f68c165c7b4cd864336dfea8 | 40dec801ea4c15ab89710cc67d4466dc0c9451f3 | refs/heads/master | 2021-05-09T06:27:20.712398 | 2018-01-29T16:23:43 | 2018-01-29T16:23:43 | 119,329,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | import json
def jsonp2json(jsonp):
try:
l_index = jsonp.index(b'(') + 1
r_index = jsonp.rindex(b')')
except ValueError:
print("Input is not in a jsonp format.")
return
return json.loads(jsonp[l_index:r_index])
def album(response):
return jsonp2json(response.content)['message']['body']['album']
def album_list(response):
album_list = jsonp2json(response.content)['message']['body']['album_list']
return [album['album'] for album in album_list]
def artist(response):
return jsonp2json(response.content)['message']['body']['artist']
def artist_list(response):
artist_list = jsonp2json(response.content)['message']['body']['artist_list']
return [artist['artist'] for artist in artist_list]
def lyrics(response):
return jsonp2json(response.content)['message']['body']['lyrics']
def track(response):
return jsonp2json(response.content)['message']['body']['track']
def track_list(response):
track_list = jsonp2json(response.content)['message']['body']['track_list']
return [track['track'] for track in track_list]
| [
"pablo.romano42@gmail.com"
] | pablo.romano42@gmail.com |
409fd7bbc3b2fbc4598c33fcc93cab31f356454c | bcf65d8e5ceebd6b92d72a32ddc724343e93bc05 | /myfaceapp/migrations/0001_initial.py | b984559cf2217cf2d6984b8de3301e53015c8494 | [] | no_license | AliHashimi12345/fb-clone4 | 1f01558e0dbb7e3c3a3de6f889e63b3d3810266c | ad0fad1e15ffad3707ad5b90b5e90244d24326fc | refs/heads/master | 2023-09-04T23:29:06.281093 | 2021-11-18T12:10:37 | 2021-11-18T12:10:37 | 429,415,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,761 | py | # Generated by Django 3.2.8 on 2021-11-17 09:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='profilemodel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='ProfilePics')),
('bio', models.CharField(max_length=200)),
('followers', models.ManyToManyField(blank=True, null=True, related_name='followers', to=settings.AUTH_USER_MODEL)),
('following', models.ManyToManyField(blank=True, null=True, to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ImageField(upload_to='posts')),
('likes', models.ManyToManyField(blank=True, null=True, related_name='likes', to=settings.AUTH_USER_MODEL)),
('profileuser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to='myfaceapp.profilemodel')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"alihashimivns@gmail.com"
] | alihashimivns@gmail.com |
525b8df092a35354d0bda23dde9fa0101c66e688 | fb2fc8154460c2d02c3719f7447064b4635f1520 | /response_time_analysis.py | 707080ac8f60c886547a521514d46529248db13e | [] | no_license | renzj/porcupine | 6a18baf89926efbbadf6d148467560ff24b39ff7 | 51c87f385e32216220332a475f0cf0cef297b99a | refs/heads/master | 2021-01-15T15:44:11.908226 | 2016-10-25T03:46:44 | 2016-10-25T03:46:44 | 43,235,525 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,120 | py | #encoding=utf8------------------------------------------------------------------
# Name:
# Purpose:
#
# Author: tianmingZhang
#
# Created: 19/04/2016
# Copyright: (c) tianmingZhang 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import mysql_utils
import time
def insert_into_detail(resp_id=0):
db_conn,mcur = mysql_utils.connMySQL(host='10.65.7.151')
mcur.execute("select resp_text from responses where respid> %d"%(resp_id))
results=mcur.fetchall()
print 'results to be inserted:',len(results)
bt = time.time()
for item in results:
entries = item[0].split('_')
#当文件类型中包含 '_' 下划线时:
ftype = entries[5]
for i in range(8,len(entries)):
ftype = ftype +'_'+entries[i-2]
mcur.execute("insert into responsesdetail values(NULL,%d,%d,'%s','%s',%d,'%s','%s',%f)" \
%(int(entries[0]),int(entries[1]),entries[2],entries[3],int(entries[4]),ftype,\
entries[-2],float(entries[-1])))
db_conn.commit()
mysql_utils.closeConn(db_conn,mcur)
print 'It costs:',time.time() - bt
def avg_data_by_id(threadid=0,arrive_req_id=0,respde_id=0):
db_conn,mcur = mysql_utils.connMySQL(host='10.65.7.151')
mcur.execute("select tid,AVG(active_threads) from threadlines where id>%d group by tid"%(threadid))
avg_active_threads_results = mcur.fetchall()
mcur.execute("select tid,AVG(avg_arrive_reqs) from arrive_requests where id > %d group by tid"%(arrive_req_id))
avg_arrive_reqs_results = mcur.fetchall()
mcur.execute("select tid,AVG(servicetime) from responsesdetail where id > %d group by tid "%(respde_id))
avg_servicetime_results = mcur.fetchall()
print 'avg_active_threads:'
for result in avg_active_threads_results:
print '\t %d %.3f ' %(result[0],result[1])
print 'avg_arrive_reqs:'
for result in avg_arrive_reqs_results:
print '\t %d %.3f ' %(result[0],result[1])
print 'avg_servicetime:'
for result in avg_servicetime_results:
print '\t %d %.3f ' %(result[0],result[1])
def avg_data_by_time(starttime,endtime):
db_conn,mcur = mysql_utils.connMySQL(host='10.65.7.151')
if starttime ==0 or endtime==0:
return
wherestatement=' between %s and %s group by tid'%(str(float(starttime)),str(float(endtime)))
mcur.execute("select tid,AVG(active_threads) from threadlines where checktime %s "%(wherestatement))
avg_active_threads_results = mcur.fetchall()
mcur.execute("select tid,AVG(avg_arrive_reqs) from arrive_requests where checktime %s "%(wherestatement))
avg_arrive_reqs_results = mcur.fetchall()
mcur.execute("select tid,AVG(servicetime) from responsesdetail where begintime %s "%(wherestatement))
avg_servicetime_results = mcur.fetchall()
print 'avg_active_threads:'
for result in avg_active_threads_results:
print '\t %d %.3f ' %(result[0],result[1])
print 'avg_arrive_reqs:'
for result in avg_arrive_reqs_results:
print '\t %d %.3f ' %(result[0],result[1])
print 'avg_servicetime:'
for result in avg_servicetime_results:
print '\t %d %.3f ' %(result[0],result[1])
def main(threadid=0,arrive_req_id=0,respde_id=0,resp_id=0,starttime=0,endtime=0):
insert_into_detail(resp_id=resp_id)
# avg_data_by_id(threadid=threadid,arrive_req_id=arrive_req_id,respde_id=respde_id)
avg_data_by_time(starttime,endtime)
if __name__ == '__main__':
import sys
if len(sys.argv) > 4:
main(threadid=int(sys.argv[1]),arrive_req_id=int(sys.argv[2]),respde_id=int(sys.argv[3]),resp_id=int(sys.argv[4]))
elif len(sys.argv) > 3 and len(sys.argv[1]) > 10 and len(sys.argv[2]) > 10:
main(resp_id=int(sys.argv[3]),starttime=sys.argv[1],endtime=sys.argv[2])
else:
print 'parameter error you must enter thread ,req,respde,response ids or starttime,endtime'
| [
"renzju@gmail.com"
] | renzju@gmail.com |
1ed7d3aaf68851dbf8dafb254281a1bed12ea1f6 | 4bc51e6724ac31b2f2bcc8b699d790f53ec9516c | /cptac/file_download.py | ba7a933a8b150330fb2df9636fb6af726c245dd0 | [
"Apache-2.0"
] | permissive | MTschool20/cptac | 073e599c0c6609802914a6a45e49acba8dd687bd | 90e7bc2e264e4238ca21d654fd3b559f41b974f8 | refs/heads/master | 2023-05-07T09:31:48.793943 | 2021-05-18T00:45:33 | 2021-05-18T00:45:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,477 | py | # Copyright 2018 Samuel Payne sam_payne@byu.edu
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, cli, request
from threading import Thread
from pathlib import Path
import webbrowser
import time
import logging
import os
import requests
import getpass
import bs4
from .file_tools import *
from .exceptions import InvalidParameterError, NoInternetError, DownloadFailedError
# Some websites don't like requests from sources without a user agent. Let's preempt that issue.
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:39.0)'
HEADERS = {'User-Agent': USER_AGENT}
# For a rudimentary data sharing between processes
path_here = os.path.abspath(os.path.dirname(__file__))
LOCK_PATH = os.path.join(path_here, "lock.tmp")
CODE_FILE_PATH = os.path.join(path_here, "code.tmp")
def download(dataset, version="latest", redownload=False, _box_auth=False, _box_token=None):
"""Download data files for the specified datasets. Defaults to downloading latest version on server.
Parameters:
dataset (str): The name of the dataset to download data for, or "all" to download data for all datasets
version (str, optional): Which version of the data files to download. Defaults to latest on server.
redownload (bool, optional): Whether to redownload the data files, even if that version of the data is already downloaded. Default False.
_box_auth (bool, optional): Whether to download the files using Box file IDs and OAuth2 authentication. Default False.
_box_token (str, optional): The OAuth2 token for Box, if already generated. Default None.
Returns:
bool: Indicates whether download was successful.
"""
dataset = dataset.lower()
# Check that they're using the right download function
datasets = [
"brca",
"ccrcc",
"colon",
"endometrial",
"gbm",
"hnscc",
"lscc",
"luad",
"ovarian",
"pdac",
"ucecconf",
]
if dataset in datasets and _box_auth:
raise InvalidParameterError(f"You are trying to use the cptac.pancan.download function to download the {dataset} dataset. This is the wrong function; that dataset is not associated with the cptac.pancan module. Please instead use the regular cptac.download function.")
# Process the optional "all" parameter
if dataset == "all":
overall_result = True
for dataset in datasets:
if not download(dataset, redownload=redownload):
overall_result = False
return overall_result
# Get our dataset path
dataset_path = get_dataset_path(dataset)
# Update the index
update_index(dataset)
# Load the index
index = get_index(dataset)
# Validate the version number, including parsing if it's "latest"
version = validate_version(version, dataset, use_context="download")
# Construct the path to the directory for this version
version_path = os.path.join(dataset_path, f"{dataset}_v{version}")
# See if they've downloaded this version before. Get list of files to download.
version_index = index.get(version)
if os.path.isdir(version_path):
if redownload:
files_to_download = list(version_index.keys())
else:
files_to_download = []
for data_file in version_index.keys():
# Compare the server and local hashes, to make sure there was no data corruption
file_path = os.path.join(version_path, data_file)
if os.path.isfile(file_path):
file_index = version_index.get(data_file)
server_hash = file_index.get("hash")
local_hash = hash_file(file_path)
if local_hash == server_hash:
continue
files_to_download.append(data_file)
if len(files_to_download) == 0:
return True
else:
os.mkdir(version_path)
files_to_download = list(version_index.keys())
# Download the files
password = None
total_files = len(files_to_download)
if _box_auth and _box_token is None:
_box_token = get_box_token()
for data_file in files_to_download:
file_index = version_index.get(data_file)
server_hash = file_index.get("hash")
file_url = file_index.get("url")
file_path = os.path.join(version_path, data_file)
file_number = files_to_download.index(data_file) + 1
downloaded_path = download_file(file_url, file_path, server_hash, password=password, _box_token=_box_token, file_message=f"{dataset} v{version} data files", file_number=file_number, total_files=total_files)
while downloaded_path == "wrong_password":
if password is None:
password = getpass.getpass(prompt=f'Password for {dataset} dataset: ') # We manually specify the prompt parameter so it shows up in Jupyter Notebooks
else:
password = getpass.getpass(prompt="Wrong password. Try again: ")
print("\033[F", end='\r') # Use an ANSI escape sequence to move cursor back up to the beginning of the last line, so in the next line we can clear the password prompt
print("\033[K", end='\r') # Use an ANSI escape sequence to print a blank line, to clear the password prompt
downloaded_path = download_file(file_url, file_path, server_hash, password=password, _box_token=_box_token, file_message=f"{dataset} v{version} data files", file_number=file_number, total_files=total_files)
return True
def update_index(dataset):
"""Check if the index of the given dataset is up to date with server version, and update it if needed.
Parameters:
dataset (str): The name of the dataset to check the index of.
Returns:
bool: Indicates if we were able to check the index and update if needed (i.e. we had internet)
"""
# Get the path to our dataset
dataset_path = get_dataset_path(dataset)
# Define our file names we'll need
index_urls_file = "index_urls.tsv"
index_hash_file = "index_hash.txt"
index_file = "index.txt"
# Get, from the server, what the md5 hash of our index file should be
index_urls_path = os.path.join(dataset_path, index_urls_file)
urls_dict = parse_tsv_dict(index_urls_path)
index_hash_url = urls_dict.get(index_hash_file)
checking_msg = f"Checking that {dataset} index is up-to-date..."
print(checking_msg, end='\r')
try:
server_index_hash = download_text(index_hash_url)
finally:
print(" " * len(checking_msg), end='\r') # Erase the checking message, even if there was an internet error
index_path = os.path.join(dataset_path, index_file)
if os.path.isfile(index_path):
local_index_hash = hash_file(index_path)
if local_index_hash == server_index_hash:
return True
index_url = urls_dict.get(index_file)
download_file(index_url, index_path, server_index_hash, file_message=f"{dataset} index")
if os.path.isfile(index_path):
local_index_hash = hash_file(index_path)
if local_index_hash == server_index_hash:
return True
# If we get here, something apparently went wrong with the download.
raise NoInternetError("Insufficient internet. Check your internet connection.")
def download_text(url):
"""Download text from a direct download url for a text file.
Parameters:
url (str): The direct download url for the text.
Returns:
str: The downloaded text.
"""
try:
response = requests.get(url, headers=HEADERS, allow_redirects=True)
response.raise_for_status() # Raises a requests HTTPError if the response code was unsuccessful
except requests.RequestException: # Parent class for all exceptions in the requests module
raise NoInternetError("Insufficient internet. Check your internet connection.") from None
text = response.text.strip()
return text
def download_file(url, path, server_hash, password=None, _box_token=None, file_message=None, file_number=None, total_files=None):
"""Download a file from a given url to the specified location.
Parameters:
url (str): The direct download url for the file.
path (str): The path to the file (not just the directory) to save the file to on the local machine.
server_hash (str): The hash for the file, to check it against. If check fails, try download one more time, then throw an exception.
password (str, optional): If the file is password protected, the password for it. Unneeded otherwise.
_box_token (str, optional): The OAuth2 token for Box, if we're downloading a file that needs that. Default of None ignores that option.
file_message (str, optional): Identifing message about the file, to be printed while it's downloading. Default None will cause the full file name to be printed.
file_number (int, optional): Which file this is in a batch of files, if you want to print a "File 1/15", "File 2/15", etc. sort of message. Must also pass total_files parameter.
total_files (int, optional): The total number of files in the download batch, if you're printing that. Must also pass file_number parameter.
Returns:
str: The path the file was downloaded to.
"""
# We provide the option of displaying a message indicating which file this is in a batch of files we're currently downloading
batch_status = ''
if (file_number is not None) and (total_files is not None):
batch_status = f" ({file_number}/{total_files})"
if file_message is None:
file_message = path.split(os.sep)[-1]
download_msg = f"Downloading {file_message}{batch_status}..."
print(download_msg, end='\r')
for i in range(2):
try:
if _box_token is not None: # We are using Box OAuth2
download_url = f"https://api.box.com/2.0/files/{url}/content/" # url is actually file ID
headers = dict(HEADERS)
headers["Authorization"] = f"Bearer {_box_token}"
response = requests.get(download_url, headers=headers)
elif password is None: # No password or OAuth2
response = requests.get(url, headers=HEADERS, allow_redirects=True)
else: # The file is password protected
with requests.Session() as session: # Use a session object to save cookies
# Construct the urls for our GET and POST requests
get_url = url
post_url = get_url.replace("https://byu.box.com/shared", "https://byu.app.box.com/public")
# Send initial GET request and parse the request token out of the response
get_response = session.get(get_url, headers=HEADERS)
soup = bs4.BeautifulSoup(get_response.text, "html.parser")
token_tag = soup.find(id="request_token")
token = token_tag.get("value")
# Send a POST request, with the password and token, to get the data
payload = {
'password': password,
'request_token': token}
response = session.post(post_url, headers=HEADERS, data=payload)
response.raise_for_status() # Raises a requests.HTTPError if the response code was unsuccessful
except requests.RequestException: # Parent class for all exceptions in the requests module
raise NoInternetError("Insufficient internet. Check your internet connection.") from None
local_hash = hash_bytes(response.content)
if local_hash == server_hash: # Only replace the old file if the new one downloaded successfully.
with open(path, 'wb') as dest:
dest.write(response.content)
print(" " * len(download_msg), end='\r') # Erase the downloading message
return path
elif response.text.strip().startswith("<!DOCTYPE html>"): # The password was wrong, so we just got a webpage
print(" " * len(download_msg), end='\r') # Erase the downloading message
return "wrong_password"
# If we get to this point, the download failed.
file_name = path.split(os.sep)[-1]
raise DownloadFailedError(f"Download failed for {file_name}.")
# Set up a localhost server to receive access token
app = Flask(__name__)
@app.route('/receive')
def receive():
# Get the temporary access code
code = request.args.get('code')
# Create our "lock flag" file
Path(LOCK_PATH).touch()
# Save the code
with open(CODE_FILE_PATH, "w") as code_file:
code_file.write(code)
# Remove lock flag
os.remove(LOCK_PATH)
# Shutdown the server. This will allow the thread it's running on to finish as well.
request.environ.get("werkzeug.server.shutdown")()
return "Authentication successful. You can close this window."
def get_box_token():
# Don't show starting message from server
cli.show_server_banner = lambda *_: None
# Don't show logs from server
log = logging.getLogger('werkzeug')
log.disabled = True
# Set up authentication parameters
base_url = "https://account.box.com/api/oauth2/authorize"
client_id = "kztczhjoq3oes38yywuyfp4t9tu11it8"
client_secret = "a5xNE1qj4Z4H3BSJEDVfzbxtmxID6iKY"
login_url = f"{base_url}?client_id={client_id}&response_type=code"
# Start the server
server = Thread(target=app.run, kwargs={"port": "8003"})
server.start()
# Send the user to the "Grant access" page
webbrowser.open(login_url)
login_msg = "Please login to Box on the webpage that was just opened and grant access for cptac to download files through your account. If you accidentally closed the browser window, press Ctrl+C and call the download function again."
print(login_msg)
# Get the temporary access code from the server on the child process
temp_code = None
while temp_code is None:
if os.path.isfile(CODE_FILE_PATH) and not os.path.isfile(LOCK_PATH):
with open(CODE_FILE_PATH) as code_file:
temp_code = code_file.read()
os.remove(CODE_FILE_PATH)
else:
time.sleep(1)
# Wait for the server process to finish. Note in the receive endpoint handler shuts down the server automatically after receiving the code, so we don't need to worry about trying to kill it.
server.join()
# Use the temporary access code to get the long term access token
token_url = "https://api.box.com/oauth2/token";
params = {
'grant_type': 'authorization_code',
'code': temp_code,
'client_id': client_id,
'client_secret': client_secret,
}
auth_resp = requests.post(token_url, data=params)
access_token = auth_resp.json()["access_token"]
return access_token
| [
"calebmlindgren@gmail.com"
] | calebmlindgren@gmail.com |
f7890fddbceeaca7539b8578a744574f2f12a935 | 998a6b1cc770754f10784a25c1812bb1fc2aa4c3 | /Loops.py | ed59a686233a18ae066509a8ec4151c2d45e40d0 | [] | no_license | A-Young-Coder/Python-For-Beginners | fe48fa96b97c222867d51873666a405591389b8a | eb4603536b020d5b1dd1dbf7607ec518879164cb | refs/heads/master | 2023-07-30T03:21:13.262036 | 2021-09-12T13:51:03 | 2021-09-12T13:51:03 | 405,416,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | loop_tiems = [1, 2, 3, 4]
loop_number = 0
while loop_number < len(loop_tiems):
print(f'This is loop number: {loop_number}')
loop_number += 1 | [
"the.young.coder11@gmail.com"
] | the.young.coder11@gmail.com |
583a033788c0b550cdea3e0cb188d0f3ef7da24a | bc0a99010c49b0a80e912bb7bf15d5479ee11200 | /serveml/loader.py | 7b8c98d2140d1729eb5d3785962822d1c0cd17dd | [
"Apache-2.0"
] | permissive | gfalcone/serveml | 6b017ceda45c7926ba734a186bcc69697e74d3c4 | 51937e3884f73d04482e22d82d3f771946361751 | refs/heads/master | 2023-05-22T22:28:40.778780 | 2020-04-02T10:26:58 | 2020-04-02T10:26:58 | 243,072,210 | 26 | 1 | Apache-2.0 | 2021-06-10T15:46:32 | 2020-02-25T18:35:04 | Python | UTF-8 | Python | false | false | 850 | py | import mlflow
from mlflow.pyfunc import load_model
def load_mlflow_model(path: str, tracking_uri: str = None):
"""
Generic function that loads model from MlFlow.
Relies on this function : https://www.mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#mlflow.pyfunc.load_model
:param path: path of the model, can be one of the following:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
:param tracking_uri: MlFlow Tracking URI (example: http://localhost:5000)
"""
if mlflow.set_tracking_uri is not None:
mlflow.set_tracking_uri(tracking_uri)
return load_model(path)
| [
"paolo@getaround.com"
] | paolo@getaround.com |
483e2de8f5aabad5ceb3ec89255f04baab08ecd1 | 0e973d6b0c72ce426c19ece76596dd8a83b2f955 | /tools/editor/pack_panel.py | 4982a94407412b914587be7dc9d64cc5a1158bda | [] | no_license | korialuo/ejoy2dx | 733b98172c1fe8e5535c10fc039dd8e9cc40d1de | e30e821682beb927326795d37eb0926e02bbd61f | refs/heads/master | 2021-01-11T16:05:12.920643 | 2016-12-08T11:22:39 | 2016-12-08T11:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | import wx
import os
import sys
from custom_tree import CustomTreeCtrl
import pack_tree
try:
from agw import customtreectrl as CT
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.customtreectrl as CT
class pack_panel(wx.Panel):
def __init__(self, parent, style):
wx.Panel.__init__(self, parent.book, style=style)
# scroll = wx.ScrolledWindow(self, -1, style=wx.SUNKEN_BORDER)
# scroll.SetScrollRate(20,20)
self.main = parent
# Create the CustomTreeCtrl, using a derived class defined below
self.tree = CustomTreeCtrl(self, -1, rootLable="packs",
style=wx.SUNKEN_BORDER,
agwStyle=CT.TR_HAS_BUTTONS | CT.TR_HAS_VARIABLE_ROW_HEIGHT)
self.tree.menu_callback = self.show_menu
mainsizer = wx.BoxSizer(wx.VERTICAL)
mainsizer.Add(self.tree, 4, wx.EXPAND)
mainsizer.Layout()
self.SetSizer(mainsizer)
self.menu_data = None
def set_data(self, data):
self.tree.Reset()
for k, v in data.iteritems():
pack = self.tree.AppendItem(self.tree.root, k)
for idx, p in v.iteritems():
export = p.get('export')
if export:
root = self.tree.AppendItem(pack, export)
self.tree.SetPyData(root, [k, export])
pack_tree.show_sprite(self.tree, root, p, v)
else:
if 'id' in p:
root = self.tree.AppendItem(pack, "id:" + p['id'])
self.tree.SetPyData(root, [k, p['id']])
pack_tree.show_sprite(self.tree, root, p, v)
def new_sprite(self, evt):
if len(self.menu_data) == 2:
self.main.NewSprite(self.menu_data[0], self.menu_data[1])
def show_menu(self, tree, item):
self.menu_data = tree.GetPyData(item)
if not self.menu_data or len(self.menu_data) < 2:
self.menu_data = None
return
menu = wx.Menu()
item1 = menu.Append(wx.ID_ANY, "New sprite")
menu.AppendSeparator()
tree.Bind(wx.EVT_MENU, self.new_sprite, item1)
tree.PopupMenu(menu)
menu.Destroy()
| [
"rainfiel@gmail.com"
] | rainfiel@gmail.com |
50069148941266ffbabb148d1571b7acd8eab9e3 | a515b936fffb89c6a786200e5aff47d44f419d56 | /fedcodeathon2018/settings.py | ebc003772231f6e4ccb7330ffe2bf1a872786445 | [
"MIT"
] | permissive | hannah-west/fedcodeathon2018-1 | ff7c4143411caff5594ce7305954a744b8e52ba9 | 2cac972b6eaebd7bfc47c02aade36b0f4a6869ab | refs/heads/master | 2020-04-02T11:41:03.285892 | 2018-10-22T02:20:34 | 2018-10-22T02:20:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,509 | py | """
Django settings for fedcodeathon2018 project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xp^z0vtoqm_4-gf#smg)l9%&vl==7pmihknog-p+cfng&szq7('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'index',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fedcodeathon2018.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fedcodeathon2018.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default':{
'ENGINE':'django.db.backends.mysql',
'NAME':'kufragmentdevs',
'USER':'root',
'PASSWORD':'fedcodeathon',
'HOST':'localhost',
'PORT':''
}
}
#DATABASES = {
# 'default':{
# 'ENGINE':'django.db.backends.mysql',
# 'NAME':'2018codathon',
# 'USER':'root',
# 'PASSWORD':'gzamtK^u4e!HV6y7',
# 'HOST':'m4h.us',
# 'PORT':'3306',
# 'SCHEMA': 'VisoVerita',
# }
#}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/index/'
| [
"elly.richardson18@gmail.com"
] | elly.richardson18@gmail.com |
8d9c0ab0f998c8f01a122e1b82d8798cd73ec36f | 54e0dddbd13841e818eb58361102bccf7742d0c1 | /codeforces/403/A.py | cd3bb607add30d1f9eea5ae495517022f773471c | [] | no_license | tjytlxwxhyzqfw/online-judge | fae0ff3a96182ec13fd5da6183f4d5ce6aef2a7a | 4fbc9194de8d4977bd90cfb6662fff81b66b1d08 | refs/heads/master | 2021-06-20T15:42:55.950240 | 2021-03-28T08:06:15 | 2021-03-28T08:06:15 | 74,032,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | import re
import sys
import time
def read(t=None):
string = raw_input()
return string if t is None else [t(x) for x in string.split()]
def solve():
n = int(read())
a = read(int)
n *= 2
f = [0 for i in range(2*n)]
ans = 0
max_ans = 0
for x in a:
if f[x] == 0:
f[x] = 1
ans += 1
if ans > max_ans:
max_ans = ans
else:
assert f[x] == 1
f[x] = 0
ans -= 1
print max_ans
if __name__ == "__main__":
solve()
| [
"534038294@qq.com"
] | 534038294@qq.com |
940ba161fbdf17a6beae7caec9216150b6957149 | dde3552677efe21da25c94a4c4c04df7def802ac | /qa/model/order.py | 0d8efd305797514857815c2db79a672c949c44bd | [] | no_license | wanglei850429/NLP | 61b094831e8fac378902ee8e5ff3a1c72bd08b48 | 25e03c86ce1c680b4e2ca7f165e00f3c61a937a3 | refs/heads/master | 2020-06-03T01:45:29.478167 | 2019-08-06T05:13:15 | 2019-08-06T05:13:15 | 191,380,545 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | import sys
sys.path.append('../')
from model.word2vect import load_model, vector_similarity
from utils.utils import Utils
MODEL_FILE = '../data/question.word2vec.bin'
DEBUG = False
word_vector_model = load_model(MODEL_FILE)
def order_by_word2vector(es_result, question,n=5):
if DEBUG:
for i in es_result:
print('---------------------------------------------------------')
print('问题: %s' % (i['_source']['question']))
print('答案:%s' % (i['_source']['anwser']))
#根据已经学习好的词向量,对结果进行重排序
order_result = []
for key,i in es_result['result2'].items():
record = []
if word_vector_model:
v = vector_similarity(question, i['title'], word_vector_model)
record.append(i['index'])
record.append(i['title'])
record.append(v)
order_result.append(record)
else:
break
if len(order_result) > 0 :
sorted(order_result,key=lambda x:x[2],reverse=True)
if DEBUG:
print('\n---------------------------------------------------------')
print('词向量重排后的结果:')
for i in order_result:
print('---------------------------------------------------------')
print('问题: %s' % (i[0]))
print('答案:%s' % (i[1]))
print('相似度:%f' % (i[2]))
return order_result[:n] | [
"shiori5555@163.com"
] | shiori5555@163.com |
b3a64d09dd12170ebd5c17716c48878216461273 | 213a2ef70e92fa108f7087b0170eecf02d8981ad | /mainmenu.py | 5b9d771f5dcd54c7c6c3c68e101bd31bcc177113 | [] | no_license | shanjaynielle/milktea-py-master | 863770502bf0f78e8e91f8857b901f8e62491db5 | 16c0c37603b6b8bfccea9da4a8349bba61f9665c | refs/heads/master | 2023-08-27T03:33:04.618771 | 2021-11-02T10:07:55 | 2021-11-02T10:07:55 | 423,792,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | from tkinter import *
#FUNCTIONS
def btn_clicked():
print("Button Clicked")
#FUNCTIONS
mainmenu = Tk()
mainmenu.geometry("730x490")
mainmenu.title("Main Menu Window")
mainmenu.configure(bg = "#ffffff")
canvas = Canvas(mainmenu,bg = "#ffffff",height = 490,width = 730,
bd = 0,highlightthickness = 0,relief = "ridge")
canvas.place(x = 0, y = 0)
#BACKGROUND
background_img = PhotoImage(file = f"backgroundmm.png")
background = canvas.create_image(371.0, 241.5,image=background_img)
#BUTTONS
imgmt = PhotoImage(file = f"img0mm.png")
mtbutton = Button(image = imgmt,borderwidth = 0,highlightthickness = 0,
command = btn_clicked,relief = "flat")
mtbutton.place(x = 300, y = 152,width = 130,height = 74)
imft = PhotoImage(file = f"img1mm.png")
ftbutton = Button(image = imft,borderwidth = 0,highlightthickness = 0,
command = btn_clicked,relief = "flat")
ftbutton.place(x = 300, y = 235,width = 130,height = 74)
imgc = PhotoImage(file = f"img2mm.png")
chocobutton = Button(image = imgc,borderwidth = 0,highlightthickness = 0,
command = btn_clicked,relief = "flat")
chocobutton.place(x = 300, y = 318,width = 130,height = 74)
mainmenu.resizable(False, False)
mainmenu.mainloop()
| [
"shanicejaynielle@gmail.com"
] | shanicejaynielle@gmail.com |
29e81b0aeb77c2d7f97adab42a369a78d855924c | 855c94bcc9aff1e069cd2fd95c4e07d37a4d7fd8 | /gui/sparc2nwb_gui.py | adc1d0ef9fc880328bd419b0bdf340dade1bb4ab | [
"MIT"
] | permissive | DerekYJC/sparc2nwb | 5410d9b3c8b7adb8de155acc446486850639edc1 | 45dd82a1fcc5cbe7f3203bfa72b12b31d87d4ec4 | refs/heads/main | 2023-06-22T18:25:48.294865 | 2021-07-26T14:48:19 | 2021-07-26T14:48:19 | 386,140,349 | 0 | 0 | MIT | 2021-07-26T14:45:42 | 2021-07-15T02:43:36 | Python | UTF-8 | Python | false | false | 11,613 | py | import os
import itertools
import tkinter as tk
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import logging
import pickle
import pynwb
import sys
from tkinter import *
from PIL import ImageTk,Image
from tkinter.ttk import Combobox, Progressbar
from tkinter.filedialog import askopenfilename, asksaveasfilename, askdirectory
from tkinter import scrolledtext
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from skimage import io
from datetime import datetime, timedelta
from dateutil.tz import tzlocal
from pynwb.device import Device
from pynwb.ecephys import ElectrodeGroup
from pynwb import NWBFile, TimeSeries, NWBHDF5IO
import time
# Define the function for data conversion
def convert_to_nwb(data, nwb_file):
electrode_groups = list()
for i in range(len(data)):
probe_device = Device(name=str(i+1))
probe_electrode_group = ElectrodeGroup(
name = 'Probe'+str(i+1),
description = '',
device = probe_device,
location = ''
)
nwb_file.add_device(probe_device)
electrode_groups.append(probe_electrode_group)
nwb_file.add_electrode_group(probe_electrode_group)
del_cols = []
for col in data.columns:
if ('frame' not in col) and ('neuron' not in col):
del_cols.append(col)
data = data.drop(del_cols, axis=1)
locations_array = ['']*len(data)
locations = np.array(locations_array)
groups_array = [electrode_groups[i] for i in range(len(data))]
groups = np.asarray(groups_array)
for i in range(len(groups)):
nwb_file.add_electrode(
id=i,
x=float('NaN'),
y=float('NaN'),
z=float('NaN'),
imp=float('NaN'),
location=str(locations[i]),
group=groups[i],
filtering='none'
)
for i, col in enumerate(data.columns):
if col != 'frame':
data_name = col+'_data'
data_array = np.array(data[col].values)
nwb_file.add_electrode_column(
name = data_name,
description = '1.25 kHz',
data = data_array
)
return (nwb_file)
def main_(standard_path, manifest_data, logger):
experimenter = experimenter_ent.get()
experiment_description = experimenter_desc_ent.get()
related_publications = publication_ent.get()
keywords = keywords_ent.get().split(',')
n_files = len(manifest_data['filename'])
prog = np.linspace(0, 100, n_files+1, dtype=int)
progress['value'] = prog[0]
for it, d in enumerate(manifest_data['filename']):
file_path = standard_path+d
samples_path = '/'.join(standard_path.split('/')[0:-2])+'/samples.xlsx'
subjects_path = '/'.join(standard_path.split('/')[0:-2])+'/subjects.xlsx'
data = pd.read_excel(file_path, sheet_name='responses')
start_timestamp = manifest_data[manifest_data['filename'] == d]['timestamp'].values[0]
# time_series = get_timeseries(n=len(data), start_time=start_timestamp, frequency = 1250)
file_name = file_path.split('/')[-4] +'_'+ file_path.split('/')[-1].split('.')[0]
if not os.path.exists('./nwb_files/'+standard_path.split('/')[-2]+'/'):
os.makedirs('./nwb_files/'+standard_path.split('/')[-2]+'/')
filename = './nwb_files/'+standard_path.split('/')[-2]+'/'+file_name+'.nwb'
subject_id = d.split('/')[1]
session_start_time = manifest_data[manifest_data['filename'] == d]['timestamp'].values[0]
session_timestamp = datetime.strptime(session_start_time[:-1], '%Y-%m-%dT%H:%M:%S.%f')
samples_data = pd.read_excel(samples_path)
subjects_data = pd.read_excel(subjects_path)
subject = pynwb.file.Subject(
age = samples_data[samples_data['subject_id'] == subject_id]['age'].values[0],
genotype = str(samples_data[samples_data['subject_id'] == subject_id]['specimen type'].values[0])+' '+str(samples_data[samples_data['subject_id'] == subject_id]['specimen anatomical location'].values[0]),
subject_id = subject_id,
sex = samples_data[samples_data['subject_id'] == subject_id]['sex'].values[0],
weight = str(subjects_data[subjects_data['subject_id'] == subject_id]['Weight_kg'].values[0])+' kgs',
species = samples_data[samples_data['subject_id'] == subject_id]['species'].values[0],
description = samples_data[samples_data['subject_id'] == subject_id]['protocol title'].values[0]
)
nwb_file = NWBFile(session_description = 'Sample NWB File',
identifier = file_name,
session_start_time = session_timestamp,
file_create_date = datetime.now(tzlocal()),
institution = '',
lab = '',
experimenter = experimenter,
experiment_description = experiment_description,
related_publications = related_publications,
keywords = keywords
)
nwb_file = convert_to_nwb(data, nwb_file)
io = NWBHDF5IO(filename, 'w')
io.write(nwb_file)
io.close()
logger.info('Saved '+str(filename))
# pickle.dump(nwb_file, open(filename, 'wb'))
logger_text.insert(tk.INSERT, 'Saved '+str(filename)+'\n')
progress['value'] = prog[it+1]
prog_frame.update_idletasks()
title_ = "sparc2nwb (" + str(it+1) + "/" + str(n_files) + ") converted!"
window.title(title_)
# Define the function for the command
def get_path():
global folder_selected
folder_selected = askdirectory()
txt_standardPath.insert(tk.END, folder_selected)
# Have a simple application
window = tk.Tk()
window.title("sparc2nwb")
window.geometry('1050x500')
# Create the main containers (Frame)
main_frame = tk.Frame(window, width=800, height=450, pady=20, padx=30)
side_frame = tk.Frame(window, width=200, height=450, pady=20)
prog_frame = tk.Frame(window, width=1000, height=50, pady=10)
experimenter_ent = tk.StringVar()
experimenter_desc_ent = tk.StringVar()
publication_ent = tk.StringVar()
keywords_ent = tk.StringVar()
def convert():
log_format = '%(levelname)s %(asctime)s - %(message)s'
logging.basicConfig(filename='conversion.logs',
level=logging.INFO,
format=log_format,
filemode='w')
logger = logging.getLogger()
#standard_path = './Pennsieve-dataset-124-version-2/files/primary/'
standard_path = folder_selected + '/'
manifest_data = pd.read_excel(standard_path+'manifest.xlsx')
# If there is no nwb_folders filder, then create it.
if not os.path.exists('./nwb_files/'):
os.makedirs('./nwb_files/')
main_(standard_path, manifest_data, logger)
xscrollbar = Scrollbar(main_frame, orient=HORIZONTAL)
xscrollbar.grid(row=5, columnspan=2, sticky="e", padx=5, ipadx=188)
xscrollbar_2 = Scrollbar(main_frame, orient=HORIZONTAL)
xscrollbar_2.grid(row=8, columnspan=2, sticky="e", padx=5, ipadx=188)
# Left frame
label_standardPath = tk.Label(main_frame, text="Standard Path", fg="black", font=('Arial', 10))
txt_standardPath = tk.Text(main_frame, width=70, height=1, font=('Arial', 10))
empt1 = tk.Label(main_frame, text=" ", fg="black", height=1)
btn_standardPath = tk.Button(main_frame, text="Browse", bg="gold", fg="black", font=('Arial', 10), command=get_path)
empt3 = tk.Label(main_frame, text=" ", fg="black", height=2)
label_datasetDesc = tk.Label(main_frame, text="Dataset description", fg="black", font=('Arial', 12, 'bold'))
label_experimeter = tk.Label(main_frame, text="Experimenter", fg="black", font=('Arial', 10))
experimenter_entry = tk.Entry(main_frame, textvariable=experimenter_ent, font=('Arial',10,'italic'), width=60)
label_experimeter_desc = tk.Label(main_frame, text="Experiment description", fg="black", font=('Arial', 10))
experimenter_desc_entry = tk.Entry(main_frame, textvariable=experimenter_desc_ent, font=('Arial',10,'italic'), width=60,
xscrollcommand=xscrollbar.set)
label_publication = tk.Label(main_frame, text="Related publication", fg="black", font=('Arial', 10))
publication_entry = tk.Entry(main_frame, textvariable=publication_ent, font=('Arial',10,'italic'), width=60)
label_keywords = tk.Label(main_frame, text="Keywords (comma separated)", fg="black", font=('Arial', 10))
keywords_entry = tk.Entry(main_frame, textvariable=keywords_ent, font=('Arial',10,'italic'), width=60,
xscrollcommand=xscrollbar_2.set)
label_keywords = tk.Label(main_frame, text="Keywords (comma separated)", fg="black", font=('Arial', 10))
keywords_entry = tk.Entry(main_frame, textvariable=keywords_ent, font=('Arial',10,'italic'), width=60,
xscrollcommand=xscrollbar_2.set)
label_logger = tk.Label(main_frame, text="Logging information", fg="grey", font=('Arial', 10))
logger_text = scrolledtext.ScrolledText(main_frame, wrap=tk.WORD, font=('Arial',10,'italic'), width=57,
height=7)
label_standardPath.grid(row=0, column=0, sticky="ns", padx=2)
txt_standardPath.grid(row=0, column=1, sticky="ns", padx=2)
empt1.grid(row=0, column=2, sticky="ns", padx=2)
btn_standardPath.grid(row=0, column=3, sticky="ns", padx=2)
empt3.grid(row=1, columnspan=3, sticky="ns", padx=2)
label_datasetDesc.grid(row=2, columnspan=2, sticky="w")
label_experimeter.grid(row=3, column=0, sticky="ew", padx=2, pady=5)
experimenter_entry.grid(row=3, columnspan=2, sticky="e", padx=5, pady=5)
label_experimeter_desc.grid(row=4, column=0, sticky="ew", padx=2, pady=5)
experimenter_desc_entry.grid(row=4, columnspan=2, sticky="e", padx=5, pady=5, ipady=5)
xscrollbar.config(command=experimenter_desc_entry.xview)
label_publication.grid(row=6, column=0, sticky="ew", padx=2, pady=5)
publication_entry.grid(row=6, columnspan=2, sticky="e", padx=5, pady=5)
label_keywords.grid(row=7, column=0, sticky="ew", padx=2, pady=5)
keywords_entry.grid(row=7, columnspan=2, sticky="e", padx=5, pady=5, ipady=5)
xscrollbar_2.config(command=keywords_entry.xview)
label_logger.grid(row=9, column=0, sticky="ew", padx=2, pady=5)
logger_text.grid(row=9, columnspan=2, sticky="e", padx=5, pady=5)
# Right frame
image = Image.open("image.jpg")
image = image.resize((200, 200), Image.ANTIALIAS)
image = ImageTk.PhotoImage(image)
panel = tk.Label(side_frame, image=image)
empt2 = tk.Label(side_frame, text=" ", fg="black", height=10)
btn_convert = tk.Button(side_frame, text=" Convert ", bg="lightgreen", fg="black", font=('Arial', 12), command=convert)
panel.grid(row=0, column=0)
empt2.grid(row=1, column=0)
btn_convert.grid(row=2, column=0)
# Bottom frame
label_progress = tk.Label(prog_frame, text="Conversion progress", fg="black", font=('Arial', 10))
progress = Progressbar(prog_frame, orient=tk.HORIZONTAL, length=770, mode='determinate')
label_progress.grid(row=0, column=0, sticky="ew", padx=50)
progress.grid(row=0, column=1, sticky="ew")
# Organization for frames
main_frame.grid(row=0, column=0, sticky="ns")
side_frame.grid(row=0, column=1, sticky="ns")
prog_frame.grid(row=1, columnspan = 2, sticky="ew")
tk.mainloop()
| [
"noreply@github.com"
] | DerekYJC.noreply@github.com |
366de226c7646d2431d87cfddec889845e445d13 | 8770524b3ffcc082e7e41f5a4d190ff7e74e2661 | /producerVelib/producer.py | c8f004947ae953ab6fab54f1631700f55f176f9a | [] | no_license | grvn-ht/VelibApp-Kafka-Python-React-Docker | 1cd4962e532c0b936fedc7408a32bd62bbb63323 | a9250218be35aac5fecd4d7869c1d46cc88ab9b9 | refs/heads/master | 2023-03-08T08:21:12.151352 | 2021-03-01T20:01:10 | 2021-03-01T20:01:10 | 332,783,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | import json
import time
import urllib.request
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers=["kafka1:29092","kafka2:29093","kafka3:29094"], api_version=(0, 10, 1))
req = 'https://opendata.paris.fr/api/records/1.0/search/?dataset=velib-disponibilite-en-temps-reel&rows=1500'
while True:
a=0
while a!=1:
try:
response = urllib.request.urlopen(req)
a=1
except:
print('can t acces url api, sleep 10')
time.sleep(10)
api_velib = json.loads(response.read().decode())
records=api_velib['records']
for i in range(len(records)):
station=records[i]
infos_station=station['fields']
station['fields']['broke_ebike'] = []
station['fields']['broke_mechanical'] = []
producer.send("velib", json.dumps(station).encode(), key=str(infos_station["stationcode"]).encode())
print("{} Produced {} station records".format(time.time(), len(infos_station)))
time.sleep(60)
| [
"grvn.huet@gmail.com"
] | grvn.huet@gmail.com |
b34561df0fc9fbf7967d223ac8a7b9c44c4323a2 | 174f02e66915184aa6353af732e9aa1d6989e6de | /source/lib/SConscript | bed9a68f5c45fc7af8e21b222f67b3379dc888a2 | [] | no_license | giszo/urubu | a4f1ef3bb6d78275ad0d482d7afedb4378de0a5b | 0a80e1f45fb66c273162f07718e74ff95aa4a14c | refs/heads/master | 2020-12-24T13:28:38.871285 | 2013-11-14T20:09:30 | 2013-11-14T20:09:30 | 14,167,345 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 852 | # Urubu build system
#
# Copyright (c) 2013 Zoltan Kovacs
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Import("env")
SConscript(dirs = ["crt0", "c", "support", "devman"], exports = ["env"])
| [
"giszo.k@gmail.com"
] | giszo.k@gmail.com | |
4b3237b0d4f5231a5cb009b616b46afaa72b71de | 40fe9004b894e42ba0fc0d5dcc38c074026ca7a9 | /ejemplo 5.py | e9ccc283f6dffbde290b4a93461e2e61297f1378 | [] | no_license | AlfredoGuadalupe/Lab-EDA-1-Practica-9 | 2bee96c24ec7400274cb63387246a2bccae0c95a | ed93f911e4ea55102060945b55d93fe896974d01 | refs/heads/master | 2021-05-19T11:31:38.056056 | 2020-04-04T22:38:33 | 2020-04-04T22:38:33 | 251,675,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | x=10
cadena="Hola Mundo"
print(type(x))
print(type(cadena))
x="Hola Mundo"
cadena=10
print(type(x))
print( type(cadena))
| [
"noreply@github.com"
] | AlfredoGuadalupe.noreply@github.com |
7036ac32fee9e65130dd13ff7295975182eedaf6 | 78bb44c98f241d16a853e318cdbe9573d307d3e1 | /google/appengine/api/appinfo_errors.py | a681837a4ab7d9b740ebe66866196e98388a8b54 | [
"Apache-2.0"
] | permissive | vicmortelmans/catholicmissale | 2a8129eef31142b43e66fbd72620c7454b3c929b | b36916181d87f4f31f5bbbb976a7e88f55296986 | refs/heads/master | 2021-01-17T11:36:49.768808 | 2020-11-17T18:14:16 | 2020-11-17T18:14:16 | 11,186,395 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,750 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Errors used in the Python appinfo API, used by app developers."""
class Error(Exception):
"""Base datastore AppInfo type."""
class EmptyConfigurationFile(Error):
"""Tried to load empty configuration file"""
class MultipleConfigurationFile(Error):
"""Tried to load configuration file with multiple AppInfo objects"""
class MultipleProjectNames(Error):
"""Configuration file had both "application:" and "project:" fields.
A configuration file can specify the project name using either the old-style
"application: name" syntax or the newer "project: name" syntax, but not both.
"""
class UnknownHandlerType(Error):
"""Raised when it is not possible to determine URL mapping type."""
class UnexpectedHandlerAttribute(Error):
"""Raised when a handler type has an attribute that it does not use."""
class MissingHandlerAttribute(Error):
"""Raised when a handler is missing an attribute required by its type."""
class MissingURLMapping(Error):
"""Raised when there are no URL mappings in external appinfo."""
class TooManyURLMappings(Error):
"""Raised when there are too many URL mappings in external appinfo."""
class PositionUsedInAppYamlHandler(Error):
"""Raised when position attribute is used in handler in AppInfoExternal."""
class InvalidBuiltinFormat(Error):
"""Raised when the name of the builtin in a list item cannot be identified."""
class MultipleBuiltinsSpecified(Error):
"""Raised when more than one builtin is specified in a single list element."""
class DuplicateBuiltinsSpecified(Error):
"""Raised when a builtin is specified more than once in the same file."""
class BackendNotFound(Error):
"""Raised when a Backend is required but not specified."""
class DuplicateBackend(Error):
"""Raised when a backend is found more than once in 'backends'."""
class MissingApiConfig(Error):
"""Raised if an api_endpoint handler is configured but no api_config."""
class RuntimeDoesNotSupportLibraries(Error):
"""Raised when 'libraries' is used in a runtime that does not support it."""
class DuplicateLibrary(Error):
"""Raised when a library is found more than once in 'libraries'."""
class InvalidLibraryVersion(Error):
"""Raised when a library uses a version that isn't supported."""
class InvalidLibraryName(Error):
"""Raised when a library is specified that isn't supported."""
class ThreadsafeWithCgiHandler(Error):
"""Raised when threadsafe is enabled with a CGI handler specified."""
class MissingThreadsafe(Error):
"""Raised when the runtime needs a threadsafe declaration and it's missing."""
class InvalidHttpHeaderName(Error):
"""Raised when an invalid HTTP header name is used.
This issue arrises what a static handler uses http_headers. For example, the
following would not be allowed:
handlers:
- url: /static
static_dir: static
http_headers:
D@nger: Will Robinson
"""
class InvalidHttpHeaderValue(Error):
"""Raised when an invalid HTTP header value is used.
This issue arrises what a static handler uses http_headers. For example, the
following would not be allowed:
handlers:
- url: /static
static_dir: static
http_headers:
Some-Unicode: "\u2628"
"""
class ContentTypeSpecifiedMultipleTimes(Error):
"""Raised when mime_type and http_headers specify a mime type.
N.B. This will be raised even when both fields specify the same content type.
E.g. the following configuration (snippet) will be rejected:
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
This only applies to static handlers i.e. a handler that specifies static_dir
or static_files.
"""
class TooManyHttpHeaders(Error):
"""Raised when a handler specified too many HTTP headers.
The message should indicate the maximum number of headers allowed.
"""
class TooManyScalingSettingsError(Error):
"""Raised when more than one scaling settings section is present."""
class MissingRuntimeError(Error):
"""Raised when the "runtime" field is omitted for a non-vm."""
| [
"vicmortelmans@gmail.com"
] | vicmortelmans@gmail.com |
0f302751d52a6405f153aeca48f9dfd49db15020 | a9a1ffcef92fc711b8850f30370cfe37cfaf98e4 | /core/domain/recommendations_jobs.py | c766d33ea1b0409aa7646aeaadfdbe19d9fcba42 | [
"Apache-2.0"
] | permissive | leandrotoledo/oppia | bb81ebd198c594054a752e93ed4618bba2f279c3 | 55bc67251a70768b7ec9555750f236fb530497c0 | refs/heads/develop | 2020-04-14T00:04:20.109854 | 2015-09-21T07:50:09 | 2015-09-21T07:50:09 | 42,904,723 | 1 | 0 | null | 2015-09-22T01:52:28 | 2015-09-22T01:52:27 | null | UTF-8 | Python | false | false | 4,821 | py | # coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs for recommendations."""
__author__ = 'Xinyu Wu'
import ast
from core import jobs
from core.platform import models
(exp_models, recommendations_models,) = models.Registry.import_models([
models.NAMES.exploration, models.NAMES.recommendations])
class ExplorationRecommendationsRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
pass
class ExplorationRecommendationsAggregator(
jobs.BaseContinuousComputationManager):
"""A continuous-computation job that computes recommendations for each
exploration.
This job does not have a realtime component. There will be a delay in
propagating new updates to recommendations; the length of the delay
will be approximately the time it takes a batch job to run."""
@classmethod
def get_event_types_listened_to(cls):
return []
@classmethod
def _get_realtime_datastore_class(cls):
return ExplorationRecommendationsRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return ExplorationRecommendationsMRJobManager
@classmethod
def _handle_incoming_event(cls, active_realtime_layer, event_type, *args):
pass
class ExplorationRecommendationsMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
"""Manager for a MapReduce job that computes a list of recommended
explorations to play after completing some exploration."""
@classmethod
def _get_continuous_computation_class(cls):
return ExplorationRecommendationsAggregator
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExpSummaryModel]
@staticmethod
def map(item):
from core.domain import exp_services
from core.domain import recommendations_services
from core.domain import rights_manager
# Only process the exploration if it is not private
if item.status == rights_manager.EXPLORATION_STATUS_PRIVATE:
return
# Note: There is a threshold so that bad recommendations will be
# discarded even if an exploration has few similar explorations.
SIMILARITY_SCORE_THRESHOLD = 3.0
exp_summary_id = item.id
exp_summaries_dict = (
exp_services.get_non_private_exploration_summaries())
# Note: This is needed because the exp_summaries_dict is sometimes
# different from the summaries in the datastore, especially when
# new explorations are added.
if exp_summary_id not in exp_summaries_dict:
return
reference_exp_summary = exp_summaries_dict[exp_summary_id]
for compared_exp_id, compared_exp_summary in exp_summaries_dict.iteritems():
if compared_exp_id != exp_summary_id:
similarity_score = (
recommendations_services.get_item_similarity(
reference_exp_summary.category,
reference_exp_summary.language_code,
reference_exp_summary.owner_ids,
compared_exp_summary.category,
compared_exp_summary.language_code,
compared_exp_summary.exploration_model_last_updated,
compared_exp_summary.owner_ids,
compared_exp_summary.status))
if similarity_score >= SIMILARITY_SCORE_THRESHOLD:
yield (exp_summary_id, {
'similarity_score': similarity_score,
'exp_id': compared_exp_id
})
@staticmethod
def reduce(key, stringified_values):
from core.domain import recommendations_services
MAX_RECOMMENDATIONS = 10
other_exploration_similarities = sorted(
[ast.literal_eval(v) for v in stringified_values],
reverse=True,
key=lambda x: x['similarity_score'])
recommended_exploration_ids = [
item['exp_id']
for item in other_exploration_similarities[:MAX_RECOMMENDATIONS]]
recommendations_services.set_recommendations(
key, recommended_exploration_ids)
| [
"wxyxinyu@gmail.com"
] | wxyxinyu@gmail.com |
88221fcdc4d5cdbc8170fc30fecdbfb61a5a11af | 041b53214bb9b5677a71a372458653f273c8019c | /ParametricLeaf.py | edd4b3b0e212a6c11cef01fe2a2d655c2c03b5a2 | [] | no_license | ChenLingFeng22/Pcd-to-Parametric-Leaf | a0a84134452810770a2d0352d67e6539c7d1aac2 | 826d5d4164a87f7fa4b5ada518d8c549d79b6a97 | refs/heads/master | 2020-07-14T16:18:11.518326 | 2019-08-30T09:40:58 | 2019-08-30T09:40:58 | 205,347,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,900 | py | from PcdReconstruction.Bezier import Bezier
from PcdReconstruction.Midrib import Midrib
from PcdReconstruction.Silhouette import Silhouette
import numpy as np
MIDRIB_SAMPLE = 50
CROSS_SECTION_SAMPLE = 20
class ParametricLeaf:
def __init__(self):
self.A1 = np.array([0, 0.3, 0])
self.A2 = np.array([0, 0.7, 0])
self.S1 = np.array([0.1, 0.25, 0])
self.S2 = np.array([0.5, 0.4, 0])
self.S3 = np.array([0.9, 0.25, 0])
self.F = np.array([0.0, 0.0, 0.0])
self.G = np.array([0.0, 0.0, 0.0])
def computeParamLeafModel(self):
self.computeMidrib()
self.computeSilhouette()
self.computeLongitudinal()
self.computeCrossSection()
def computeMidrib(self):
self.midrib = Midrib(np.array([[0, 0, 0], self.A1, self.A2, [0, 1, 0]]))
def computeSilhouette(self):
self.silhouetteL = Silhouette(self.midrib, np.array([self.S1, self.S2, self.S3]))
self.silhouetteR = Silhouette(self.midrib,
np.array([np.multiply(self.S1, [1, -1, 1]), np.multiply(self.S2, [1, -1, 1]),
np.multiply(self.S3, [1, -1, 1])]))
def computeLongitudinal(self):
self.curveFL = Silhouette(self.midrib, np.array(
[[self.S1[0], self.S1[1] * (1 / 3), self.F[0]],
[self.S2[0], self.S2[1] * (1 / 3), self.F[1]],
[self.S3[0], self.S3[1] * (1 / 3), self.F[2]]]))
self.curveFR = Silhouette(self.midrib, np.array(
[[self.S1[0], self.S1[1] * (-1 / 3), self.F[0]],
[self.S2[0], self.S2[1] * (-1 / 3), self.F[1]],
[self.S3[0], self.S3[1] * (-1 / 3), self.F[2]]]))
self.curveGL = Silhouette(self.midrib, np.array(
[[self.S1[0], self.S1[1] * (2 / 3), self.G[0]],
[self.S2[0], self.S2[1] * (2 / 3), self.G[1]],
[self.S3[0], self.S3[1] * (2 / 3), self.G[2]]]))
self.curveGR = Silhouette(self.midrib, np.array(
[[self.S1[0], self.S1[1] * (-2 / 3), self.G[0]],
[self.S2[0], self.S2[1] * (-2 / 3), self.G[1]],
[self.S3[0], self.S3[1] * (-2 / 3), self.G[2]]]))
def getCrossSection(self, v, direction):
cv0 = self.midrib.samplePoint(v).flatten()
if direction == -1:
cv1 = self.curveFL.samplePoint(v).flatten()
cv2 = self.curveGL.samplePoint(v).flatten()
cv3 = self.silhouetteL.samplePoint(v).flatten()
elif direction == 1:
cv1 = self.curveFR.samplePoint(v).flatten()
cv2 = self.curveGR.samplePoint(v).flatten()
cv3 = self.silhouetteR.samplePoint(v).flatten()
return Bezier(np.array([cv0, cv1, cv2, cv3]))
def computeCrossSection(self):
self.vertices = []
self.vertices.append([0, 0, 0])
for i in range(1, MIDRIB_SAMPLE):
v = (1 / MIDRIB_SAMPLE) * i
self.crossSectionL = self.getCrossSection(v, -1)
gap = 1 / CROSS_SECTION_SAMPLE
for j in range(CROSS_SECTION_SAMPLE, -1, -1):
self.vertices.append(self.crossSectionL.samplePoint(gap * j).flatten().tolist())
self.crossSectionR = self.getCrossSection(v, 1)
for j in range(1, CROSS_SECTION_SAMPLE + 1):
self.vertices.append(self.crossSectionR.samplePoint(gap * j).flatten().tolist())
self.vertices.append([0, 1, 0])
self.face = []
self.normal = []
num = len(self.vertices)
for i in range(-1, MIDRIB_SAMPLE - 1):
if i == -1:
for j in range(0, 2 * CROSS_SECTION_SAMPLE):
self.face.append([0, j + 2, j + 1])
A = np.array(self.vertices[0])
B = np.array(self.vertices[j + 2])
C = np.array(self.vertices[j + 1])
self.normal.append(np.cross((B - A), (C - A)).flatten().tolist())
continue
if i == MIDRIB_SAMPLE - 2:
for j in range(0, 2 * CROSS_SECTION_SAMPLE):
self.face.append([num - 1,
i * (2 * CROSS_SECTION_SAMPLE + 1) + j + 1,
i * (2 * CROSS_SECTION_SAMPLE + 1) + j + 2])
A = np.array(self.vertices[num - 1])
B = np.array(self.vertices[i * (2 * CROSS_SECTION_SAMPLE + 1) + j + 1])
C = np.array(self.vertices[i * (2 * CROSS_SECTION_SAMPLE + 1) + j + 2])
self.normal.append(np.cross((B - A), (C - A)).flatten().tolist())
continue
for j in range(1, 2 * CROSS_SECTION_SAMPLE + 1):
self.face.append([i * (2 * CROSS_SECTION_SAMPLE + 1) + j,
i * (2 * CROSS_SECTION_SAMPLE + 1) + j + 1,
(i + 1) * (2 * CROSS_SECTION_SAMPLE + 1) + j])
A = np.array(self.vertices[(i + 1) * (2 * CROSS_SECTION_SAMPLE + 1) + j])
B = np.array(self.vertices[i * (2 * CROSS_SECTION_SAMPLE + 1) + j])
C = np.array(self.vertices[i * (2 * CROSS_SECTION_SAMPLE + 1) + j + 1])
self.normal.append(np.cross((B - A), (C - A)).flatten().tolist())
self.face.append([i * (2 * CROSS_SECTION_SAMPLE + 1) + j + 1,
(i + 1) * (2 * CROSS_SECTION_SAMPLE + 1) + j + 1,
(i + 1) * (2 * CROSS_SECTION_SAMPLE + 1) + j])
A = np.array(self.vertices[i * (2 * CROSS_SECTION_SAMPLE + 1) + j + 1])
B = np.array(self.vertices[(i + 1) * (2 * CROSS_SECTION_SAMPLE + 1) + j + 1])
C = np.array(self.vertices[(i + 1) * (2 * CROSS_SECTION_SAMPLE + 1) + j])
self.normal.append(np.cross((B - A), (C - A)).flatten().tolist())
| [
"cheng.wen.bupthu@gmail.com"
] | cheng.wen.bupthu@gmail.com |
bd7d5a9145e6a90674771087727d96fb67fd1c04 | 2ae6784fe5388da20f858ac7bb2976b6ec5b522d | /code/plottingScripts/plotMLines.py | a65f7f09b7927469a474bf5c8c69c3bcb10f8233 | [] | no_license | yanyuechuixue/PTAInference | 8e39f95f4905cf17a3a6241be0806e39ebdc2c8a | 88f399db579d3f8bac7d2ff1648444075e64dec2 | refs/heads/master | 2023-01-07T23:40:09.978266 | 2020-11-08T14:45:49 | 2020-11-08T14:45:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,416 | py | import numpy as np
import matplotlib.pyplot as plt
import confBands
import matplotlib
matplotlib.rcParams.update({'font.size': 15})
orange = '#E69F00'
blue = '#0072B2'
green = '#009E73'
red = '#D55E00'
green2 = '#004D40'
blue2 = '#1E88E5'
orange2 = '#FFC107'
red2 = '#D81B60'
colSimp=orange
colGalExt=green
colGal = 'k'
simpModRunLoc = '../../runs/simpleModel/logNormLikeMstar6to10/'
simpModData = np.genfromtxt('{}combined/mLines.dat'.format(simpModRunLoc))
simpModMs = np.genfromtxt('{}combined/ms.dat'.format(simpModRunLoc))
_,p05,p25,p50,p75,p95,_ = confBands.getPercentiles(simpModMs,simpModData)
plt.fill_between(simpModMs,p05,p95,alpha=0.3,color=colSimp)
plt.fill_between(simpModMs,p25,p75,alpha=0.3,color=colSimp)
plt.plot(simpModMs,p50,color=colSimp,ls='--')
galExtModRunLoc = '../../runs/galaxyModel_ext/'
galExtModData = np.genfromtxt('{}/m_lines.dat'.format(galExtModRunLoc))
galExtModLogMs = np.genfromtxt('{}/mc.txt'.format(galExtModRunLoc))
galExtModMs = [ 10.0**logM for logM in galExtModLogMs ]
_,p05,p25,p50,p75,p95,_ = confBands.getPercentiles(galExtModMs,galExtModData)
plt.fill_between(galExtModMs,p05,p95,alpha=0.3,color=colGalExt)
plt.fill_between(galExtModMs,p25,p75,alpha=0.3,color=colGalExt)
plt.plot(galExtModMs,p50,color=colGalExt,ls='--')
galExtModPrior = np.genfromtxt('{}/m_linesprior.dat'.format(galExtModRunLoc))
p00p5,_,_,_,_,_,p99p5 = confBands.getPercentiles(galExtModMs,galExtModPrior)
plt.plot(galExtModMs, p00p5, color='k', alpha=0.7, ls=':')
plt.plot(galExtModMs, p99p5, color='k', alpha=0.7, ls=':')
"""
galModRunLoc = '../../runs/galaxyModel/'
galModData = np.genfromtxt('{}/m_lines.dat'.format(galModRunLoc))
galModLogMs = np.genfromtxt('{}/mc.txt'.format(galModRunLoc))
galModMs = [ 10.0**logM for logM in galModLogMs ]
p05,p25,p50,p75,p95 = confBands.getPercentiles(galModMs,galModData)
plt.fill_between(galModMs,p05,p95,alpha=0.3,color=colGal)
plt.fill_between(galModMs,p25,p75,alpha=0.3,color=colGal)
plt.plot(galModMs,p50,color=colGal,ls='--')
"""
plt.ylim(1E-9,1E6)
plt.xlim(1E6,1E11)
plt.yscale('log')
plt.xscale('log')
plt.xlabel(r'$\mathcal{M}~~({\rm M_{\odot}})$')
plt.ylabel(r'${\rm d}n / {\rm d} \log_{10} \mathcal{M}/{\rm M_{\odot}}~~({\rm Mpc}^{-3})$')
plt.tight_layout()
plt.savefig('combinedAnalysisPlots/dndlogM.pdf'.format(simpModRunLoc))
plt.savefig('combinedAnalysisPlots/dndlogM.png'.format(simpModRunLoc))
plt.show()
| [
"hmiddleton8@googlemail.com"
] | hmiddleton8@googlemail.com |
3d839a1fa9233be5b27f6e913afa36303108bc53 | c0f303806580d54b2ab48b37a59f530856bf0e71 | /scripts/dns_add_zone | 012fd10fea44f5446b8a592c93408829f588965d | [] | no_license | JoranTrompet-r0667009/SystBeheer | 0be21893d1a512fa1cfa21a5db3514142275b830 | 1fd20a5a31f072f1a2dcc70148b825e25ade57f1 | refs/heads/master | 2020-04-01T20:56:46.841884 | 2018-12-13T19:31:01 | 2018-12-13T19:31:01 | 153,628,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | #!/usr/bin/env python3
import sys
import os.path
import re
def print_error(message):
print(message)
sys.exit(1)
def newZone(name):
zone = name + ".joran-trompet.sb.uclllabs.be"
path = "/etc/bind/zones/" + zone
with open(path, "w+") as file:
file.write("$TTL 604800\n")
file.write("@ IN SOA " + zone + " root.joran-trompet.sb.uclllabs.be. (\n")
file.write(" 1 ;serial\n")
file.write(" 2h ;refresh\n")
file.write(" 1h ;retry\n")
file.write(" 1w ;expire\n")
file.write(" 5 ) ;neg cache TTL\n")
file.write(";\n\n")
file.write("\t".join(["@", "IN", "NS", "ns.joran-trompet.sb.uclllabs.be."]) + "\n")
file.write("\t".join(["@", "IN", "NS", "ns1.uclllabs.be."]) + "\n")
file.write("\t".join(["@", "IN", "NS", "ns2.uclllabs.be."]) + "\n")
file.write(";\n\n")
file.write("\t".join(["@", "IN", "A", "193.191.177.158"]) + "\n")
file.write("\t".join(["ns", "IN", "A", "193.191.177.158"]) + "\n")
with open("/etc/bind/zones/joran-trompet.sb.uclllabs.be", "a") as file:
file.write("\t".join([name, "IN", "NS", "ns"]) + "\n")
with open("/etc/bind/named.conf.local", "a") as file:
file.write("\nzone \"" + name + ".joran-trompet.sb.uclllabs.be\" {\n")
file.write(" type master;\n")
file.write(" file \"" + path + "\";\n")
file.write(" allow-transfer {193.191.177.254;193.191.177.254;193.191.177.4;193.191.177.221; };\n")
file.write("};\n")
if __name__ == '__main__':
args = sys.argv
args.pop(0)
if len(args) == 1:
name = args[0]
if re.match("^[A-Za-z0-9]*$", name):
newZone(name)
else:
print_error("error1")
else:
print_error("error2")
| [
"joran.trompet@uclllabs.be"
] | joran.trompet@uclllabs.be | |
bb491114b0cef321d3302da6567c88bca4d754f1 | 93dd3eaa3d53f80891250fd3cf7f8fa4e3a16a3d | /15Backgrounddetection.py | 540cc77811b20b74d4fc0d8ba12a03160708e1ea | [] | no_license | nishant-neo/ImageProcessing-Python | 3a903819ad9de0396ce830220e5bb82e927e82e0 | 604b94617416194cacd3c3050f6eca4690fe2360 | refs/heads/master | 2020-07-01T21:56:03.195610 | 2017-03-27T06:22:22 | 2017-03-27T06:22:22 | 74,253,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | import cv2
import numpy as np
#we will try to extrct the moving folks walking down the street
cap = cv2.VideoCapture('people-walking.mp4')
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('original', frame)
cv2.imshow('fg', fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | nishant-neo.noreply@github.com |
d8220916ecbd98047f863432cb5fb2eccd456d55 | 6217c4ec98e249ad4fe5f47b5e12f40b18c98dfd | /backend/mushrooms/migrations/0005_remove_mushroom_type.py | 4e5559265cb0af75625aa0f9200c61e99f6cb1fe | [] | no_license | mbrodziak/Mushrooming | 8d32ef67624d4682e5de0a5ca1d84f1fa1112558 | 3188cbff1faa720733ce3c94e297dbdc35b9a9ee | refs/heads/master | 2023-02-04T06:23:42.577857 | 2020-12-20T17:40:18 | 2020-12-20T17:40:18 | 292,370,380 | 2 | 0 | null | 2020-12-20T17:40:19 | 2020-09-02T19:02:13 | Python | UTF-8 | Python | false | false | 330 | py | # Generated by Django 3.1.1 on 2020-09-06 09:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mushrooms', '0004_auto_20200905_1927'),
]
operations = [
migrations.RemoveField(
model_name='mushroom',
name='type',
),
]
| [
"Mateusz@Mateusz"
] | Mateusz@Mateusz |
2a5c2f2bf1ceebbcc5947b1f7cb54ce037ae73ac | fba17ffa4796e4dd66d94bc552a9f6da1464e55b | /demo/util.py | 876fd186689202c42ffd0234ba00d50caaf213e1 | [] | no_license | Chenxuanqi666/Crawlerists | 0fd4889875aa6d249011eec648320ebb81f90a96 | eceaf3b261e6da549e29f894b22719837bff0003 | refs/heads/main | 2023-06-20T07:40:44.480792 | 2021-07-21T11:49:26 | 2021-07-21T11:49:26 | 387,184,840 | 0 | 0 | null | 2021-07-18T13:39:46 | 2021-07-18T13:39:46 | null | UTF-8 | Python | false | false | 2,775 | py | import time
from datetime import datetime
import re
class Util(object):
month = {
'January': 1,
'February': 2,
'March': 3,
'April': 4,
'May': 5,
'June': 6,
'July': 7,
'August': 8,
'September': 9,
'October': 10,
'November': 11,
'December': 12,
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sept': 9,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12
}
@staticmethod
def format_time(t=0):
if t == 0:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
@staticmethod
def format_time2(data1):
data = ''
list = [i for i in re.split('/| |,|:|\n|\r|\f|\t|\v',data1) if i!='']
for i in list:
data += (i+' ')
data = data.strip()
if re.findall(r'\S+ \d+ \d+ \d+ \d+',data) != []:
num = 0
while list[num] not in Util.month.keys():
num += 1
return time.strftime("%Y-%m-%d %H:%M:%S", datetime(int(list[num+2]),Util.month[list[num]],int(list[num+1]),int(list[num+3]),int(list[num+4])).timetuple())
elif re.findall(r'\S+ \d+ \d+',data) != []:
num = 0
while list[num] not in Util.month.keys():
num += 1
return time.strftime("%Y-%m-%d %H:%M:%S", datetime(int(list[num+2]),Util.month[list[num]],int(list[num+1])).timetuple())
elif re.findall(r'\d+ hours ago',data) != []:
num = 0
while re.findall(r'\d+',list[num])==[]:
num += 1
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()-int(list[num])*3600))
elif re.findall(r'\d+ days ago',data) != []:
num = 0
while re.findall(r'\d+',list[num])==[]:
num += 1
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()-int(list[num])*86400))
elif re.findall(r'\d+ weeks ago',data) != []:
num = 0
while re.findall(r'\d+',list[num])==[]:
num += 1
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()-int(list[num])*604800))
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
@staticmethod
def format_time3(data):
timeArray = time.strptime(data, "%Y-%m-%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
return timeStamp | [
"noreply@github.com"
] | Chenxuanqi666.noreply@github.com |
d0c999d0e575dcb44f8db9f2c8ab5d1d5eeba244 | 498f68b80d2f5c20f24cc3ffd86d34a3ed494757 | /test_main_page.py | e20d983b3a2e39572afa552d018fa7b56ed61e69 | [] | no_license | NazarovIL/final_task_selenium | 4218ac762076d0252d3adbac01c3e46c5dc22d52 | 057e7ab3c8e10888485603f4923ad3e7ae7d6382 | refs/heads/master | 2022-07-10T06:54:40.126692 | 2020-05-14T10:02:46 | 2020-05-14T10:02:46 | 255,917,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | from .pages.main_page import MainPage
from .pages.login_page import LoginPage
from .pages.basket_page import BasketPage
import pytest
link = "http://selenium1py.pythonanywhere.com/"
@pytest.mark.login_quest
class TestLoginFromMainPage():
def test_guest_can_go_to_login_page(self, browser):
page = MainPage(browser, link)
page.open()
page.should_be_login_link()
page.go_to_login_page()
login_page = LoginPage(browser, browser.current_url)
login_page.should_be_login_page()
def test_guest_cant_see_product_in_basket_opened_from_main_page(self, browser):
page = MainPage(browser, link)
page.open()
page.should_be_basket_link()
page.go_to_basket_page()
basket_page = BasketPage(browser, browser.current_url)
basket_page.should_not_be_product_information()
basket_page.should_be_an_empty_basket()
| [
"nazarovil2011@yandex.ru"
] | nazarovil2011@yandex.ru |
a15aa169c301e35f89e5ec5e9905e2b6d06c1520 | 85c96f45589b578ca8b61a71ef0408bd31e595da | /Classifiers/Classifier.py | 2fceb1cb313485badd039dbb8adfd690b2f0e104 | [] | no_license | MaciejBlaszczyk/Tuning-deep-learning-parameters-for-classification-problems | 7667e300d2aa4a6befe8a2faea4c6d31b370a155 | df8db5246bd3cc6af706dcdabac41382fe83ac62 | refs/heads/master | 2020-07-12T03:45:10.618384 | 2020-02-23T06:18:58 | 2020-02-23T06:18:58 | 204,709,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | import numpy as np
from Utilities.Utilities import balanced_accuracy
from Enums.ClassifierType import ClassifierType
class Classifier:
def __init__(self, data_handler, epochs, batch_size, eval_type):
self.data_handler = data_handler
self.epochs = epochs
self.batch_size = batch_size
self.eval_type = eval_type
def calculate_bal_acc(self, y_predicted, y_test):
y_predicted[np.arange(len(y_predicted)), y_predicted.argmax(1)] = 1
inversed_y_predicted = self.data_handler.label_binarizer.inverse_transform(y_predicted)
inversed_y_test = self.data_handler.label_binarizer.inverse_transform(y_test)
return balanced_accuracy(inversed_y_test, inversed_y_predicted)
@staticmethod
def get_parameters(classifier_type, args):
if classifier_type == ClassifierType.MLP:
return {'hidden_layers': args.MLP_hidden_layers,
'neurons': args.MLP_neurons,
'activation': args.activation,
'optimizer': args.optimizer}
elif classifier_type == ClassifierType.CNN:
return {'hidden_layers': args.CNN_hidden_layers,
'filters': args.CNN_filters,
'filter_size': args.CNN_filter_size,
'activation': args.activation,
'optimizer': args.optimizer}
elif classifier_type == ClassifierType.LSTM:
return {'hidden_layers': args.LSTM_hidden_layers,
'cells': args.LSTM_cells,
'embedded_vec_len': args.LSTM_embedded_vec_len,
'activation': args.activation,
'optimizer': args.optimizer}
elif classifier_type == ClassifierType.CAPS:
return {'filters': args.CAPS_filters,
'filter_size': args.CAPS_filter_size,
'channels' : args.CAPS_channels,
'caps_dim' : args.CAPS_caps_dim,
'activation': args.activation,
'optimizer': args.optimizer} | [
"maciejblaszczyk1996@gmail.com"
] | maciejblaszczyk1996@gmail.com |
66041de006ed19ac7784b6f1fe70f62b8b665fb2 | fd6c4a7bb28ffe139c4385f58b2921ed86d9b8f5 | /youshi_api/job.py | 95b85d9c873b6a0edfd32c17f0ea15ac7328bb67 | [] | no_license | xxlv/youshi_api | 5419c29d1bf96723e718f0cd3477e5301d3b8136 | 95234497c3c977be57c5877510f449ab9176db3a | refs/heads/master | 2021-01-24T06:13:18.215870 | 2015-04-14T03:51:18 | 2015-04-14T03:51:18 | 33,657,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,035 | py | from flask import Flask
from flask.ext.httpauth import HTTPBasicAuth
from flask import make_response
from flask import jsonify
from flask import abort
from flask import request
#导入ORM
from sqlalchemy import Column, String, Integer,create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.exc import NoResultFound
app=Flask(__name__)
auth = HTTPBasicAuth()
Base = declarative_base()
#MySQL启动
engine = create_engine('mysql+pymysql://root:@localhost:3306/youshi')
DBSession = sessionmaker(bind=engine)
#Model层
class Job(Base):
__tablename__="ys_job"
jid = Column(Integer, primary_key=True)
title=Column(String(20))
company=Column(String(20))
job=Column(String(20))
hire_date=Column(String(20))
status=Column(Integer)
money=Column(String(20))
accept_number=Column(Integer)
detail=Column(String(20))
pay_way=Column(String(20))
start_date=Column(String(20))
end_date=Column(String(20))
linkman=Column(String(20))
# class User(Base):
# __tablename__ ='ys_user'
# uid=Column(Integer,primary_key=True)
# username=Column(String(20))
# password=Column(String(20))
# reg_date=Column(String(20))
# reg_ip=Column(String(20))
# level=Column(String(20))
#========================================================================================
# /api/v1.0/job
#
#========================================================================================
#获取所有jobs
@app.route("/api/v1.0/job",methods=['GET'])
def get_all_jobs():
session = DBSession()
jobs=session.query(Job).all()
d=dict()
li=list()
session.commit()
total=len(jobs)
for job in jobs:
li.append(_job_obj_to_dict_(job))
return jsonify(dict({'total':total,'result':li}))
#获取指定id的job
@app.route("/api/v1.0/job/<int:job_id>",methods=['GET'])
def get_job(job_id):
session = DBSession()
try:
job=session.query(Job).filter(Job.jid==job_id).one()
except NoResultFound:
abort(404)
session.commit()
d=dict()
if job is not None:
return jsonify({"result":_job_obj_to_dict_(job)})
#删除指定ID的job
@app.route("/api/v1.0/job/<int:job_id>",methods=['DELETE'])
def del_job(job_id):
session = DBSession()
try:
res=session.query(Job).filter(Job.jid==job_id).delete()
except Exception:
res=None
session.commit()
if res is not None:
if int(res) >0 :
return jsonify({'result':'Opt Success'})
return jsonify({'error':'can not delete a job '})
#创建job
@app.route('/api/v1.0/job',methods=['POST'])
def cre_job():
session = DBSession()
#get job params
title=request.form.get('title','')
company=request.form.get('company','')
job=request.form.get('job','')
hire_date=request.form.get('hire_date','')
status=request.form.get('status','')
money=request.form.get('money','')
accept_number=request.form.get('accept_number','')
detail=request.form.get('detail','')
pay_way=request.form.get('pay_way','')
start_date=request.form.get('start_date','')
end_date=request.form.get('end_date','')
linkman=request.form.get('linkman','')
#get a instance
job=Job(title=title,company=company,job=job,hire_date=hire_date,\
status=status,money=money,accept_number=accept_number,\
detail=detail,pay_way=pay_way,start_date=start_date,end_date=end_date,linkman=linkman)
try:
session.add(job)
session.commit()
res="success"
except Exception as e:
# res=str(e)
res='error'
return jsonify({'result':res})
#更新job
@app.route('/api/v1.0/job/<int:job_id>',methods=['PUT'])
def upd_job(job_id):
session = DBSession()
title=request.form.get('title',None)
company=request.form.get('company',None)
job=request.form.get('job','Yeep')
hire_date=request.form.get('hire_date',None)
status=request.form.get('status',None)
money=request.form.get('money',None)
accept_number=request.form.get('accept_number',None)
detail=request.form.get('detail',None)
pay_way=request.form.get('pay_way',None)
start_date=request.form.get('start_date',None)
end_date=request.form.get('end_date',None)
linkman=request.form.get('linkman',None)
job_update=dict()
if title is not None:
job_update['title']=title
if company is not None:
job_update['company']=company
if job is not None:
job_update['job']=job
if hire_date is not None:
job_update['hire_date']=hire_date
if status is not None:
job_update['status']=status
if money is not None:
job_update['money']=money
if accept_number is not None:
job_update['accept_number']=accept_number
if detail is not None:
job_update['detail']=detail
if pay_way is not None:
job_update['pay_way']=pay_way
if start_date is not None:
job_update['start_date']=start_date
if end_date is not None:
job_update['end_date']=end_date
if linkman is not None:
job_update['linkman']=linkman
#map((lamda x : job_update[x]=request.get(x) if x is not None ),request.form)
try:
session.query(Job).filter(Job.jid==job_id).update(job_update)
session.commit()
res={"result":'update success'}
except Exception as e:
# res=str(e)
res={"error":'update error'}
return jsonify(res)
#=======================================Protected=======================================
@auth.get_password
def get_password(username):
if username == 'ok':
return 'python'
return None
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
@app.errorhandler(400)
@app.errorhandler(404)
@app.errorhandler(405)
def not_found(error):
return make_response(jsonify({'error': 'Route Not found'}), 404)
#=======================================Private=======================================
def _job_obj_to_dict_(job):
d=dict()
d["jid"]=job.jid
d["title"]=job.title
d["company"]=job.company
d["hire_date"]=job.hire_date
d["status"]=job.status
d["money"]=job.money
d["accept_number"]=job.accept_number
d["detail"]=job.detail
d["pay_way"]=job.pay_way
d["start_date"]=job.start_date
d["linkman"]=job.linkman
return d
if __name__ =='__main__':
app.run(debug=True)
| [
"1252804799@qq.com"
] | 1252804799@qq.com |
0aa80d7dfbf7afc3a6099f45c6922fd1da7c10de | 6751a17f3d8523df503dd3935c321b4cdc1916a0 | /hello.py | 42546a9abb1a92fb36ba474a49258035e2c4f369 | [] | no_license | vishwas-upadhyaya/house_prediction | 63a203501d007694703fe4df27785ace411b09d6 | 04b876f954bd46cf235bccfa27c1f6555aad31bc | refs/heads/master | 2021-05-22T14:33:18.523112 | 2020-04-04T10:10:08 | 2020-04-04T10:10:08 | 252,964,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | import winsound
frquency=32767
duration=10000000
winsound.Beep(frquency,duration)
def sos():
for i in range(0,3):
winsound.Beep(5500,100)
for i in range(0,3):
winsound.Beep(5500,400)
for i in range(0, 3):
winsound.Beep(5500, 100)
sos()
| [
"vishwasu2001@gmail.com"
] | vishwasu2001@gmail.com |
a7130f914b80f01a50ef06a27dcc13ca21f8568e | 38dc4462ed52f76b8b0b5daf1ab93dd7591d9487 | /scripts/navigation_example/nav_env.py | cbeb5345da3b07e97c742e2e07709f5aaf46779a | [] | no_license | AmyPhung/rl_experiments | d1542ee928ee2c056e92773cd4275f713d77f9d2 | 99c3eb71124aeb514c893264c1815c01bffae2f7 | refs/heads/main | 2023-04-23T06:32:29.402412 | 2021-01-02T17:11:08 | 2021-01-02T17:11:08 | 323,732,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,657 | py | """
Simple system where a robot attempts to navigate towards a goal.
Based on cartpole example
Helpful Inpsiration:
Car racing example: https://github.com/openai/gym/blob/master/gym/envs/box2d/car_racing.py
Known Bugs: prev dist is never updated
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class NavEnv(gym.Env):
"""
Description:
A circular robot exists in an obstacle-free environment and can turn
or drive forwards. The robot and goal states are randomized, and the
goal is to drive the robot to the goal state.
Observation:
Type: Box(5)
Num Observation Min Max
0 Robot X-Position -sim x-limit sim x-limit
1 Robot Y-Position -sim y-limit sim y-limit
2 Robot direction -pi pi
3 Goal X-Position -sim x-limit sim x-limit
4 Goal Y-Position -sim y-limit sim y-limit
Actions:
Type: Box(2)
Num Action Min Max
0 Linear velocity (m/s) -0.5 0.5
1 Steering (rad/s) -3 3
Reward:
Reward is -0.7 for every time step, +100000 for reaching the goal, and
-100000 for leaving the map, +0.3 * traversed distance to goal in the
last timestep
Starting State:
All observations are assigned a uniform random value in [-0.05..0.05]
Episode Termination:
Pole Angle is more than 12 degrees.
Cart Position is more than 2.4 (center of the cart reaches the edge of
the display).
Episode length is greater than 200.
Solved Requirements:
Considered solved when the average return is greater than or equal to
195.0 over 100 consecutive trials.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self):
# Environment parameters
self.robot_diameter = 0.3 # meters
self.goal_diameter = 1.0 # meters
self.max_linear_vel = 0.4 # meters/sec
self.max_angular_vel = 5 # radians/sec
# Rewards
self.goal_reward = 1000
self.exit_reward = -100
# self.time_reward = -0.7
self.distance_reward = 0.3
# Distance at which to fail the episode
self.world_x_limit = 3
self.world_y_limit = 2
self.max_steps = 500
# State update parameters
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = 'euler'
# Meters to pixels conversion for render
self.scale = 150
# Set constraints on action and observation spaces
action_lim = np.array([self.max_linear_vel,
self.max_angular_vel],
dtype=np.float32)
obs_lim = np.array([self.world_x_limit,
self.world_y_limit,
np.pi,
self.world_x_limit,
self.world_y_limit],
dtype=np.float32)
self.action_space = spaces.Box(-action_lim, action_lim, dtype=np.float32)
self.observation_space = spaces.Box(-obs_lim, obs_lim, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.prev_dist = None
self.num_steps = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
self.num_steps += 1
# Update state ---------------------------------------------------------
err_msg = "%r (%s) invalid" % (action, type(action))
assert self.action_space.contains(action), err_msg
r_x, r_y, r_theta, g_x, g_y = self.state
lin_vel, ang_vel = action
r_theta += self.tau * ang_vel
# Keep theta within -pi to pi range
r_theta = r_theta % (2*np.pi)
if r_theta > np.pi:
r_theta -= 2*np.pi
r_x += lin_vel*np.cos(r_theta)
r_y += lin_vel*np.sin(r_theta)
self.state = (r_x, r_y, r_theta, g_x, g_y)
# Update reward --------------------------------------------------------
# Check if robot is within goal
curr_dist = np.linalg.norm([r_x-g_x, r_y-g_y])
within_goal = bool(curr_dist < (self.goal_diameter - self.robot_diameter))
# Check if we left the field
outside_limit = bool(
r_x < -self.world_x_limit
or r_x > self.world_x_limit
or r_y < -self.world_x_limit
or r_y > self.world_x_limit)
# Check if we've gone over our time limit
over_time = bool(self.num_steps > self.max_steps)
# Compute rewards
if not within_goal and not outside_limit and not over_time:
# We're not done yet - neither terminating case has been reached
done = False
dist_delta = self.prev_dist - curr_dist
reward = (dist_delta * self.distance_reward) #+ self.time_reward
elif self.steps_beyond_done is None:
# We just reached a terminating case
done = True
if outside_limit:
reward = self.exit_reward
self.steps_beyond_done = 0
elif within_goal:
reward = self.goal_reward
self.steps_beyond_done = 0
elif over_time:
reward = self.exit_reward
self.steps_beyond_done = 0
else:
# We've reached a terminating case again
done = True
if self.steps_beyond_done == 0:
logger.warn(
"You are calling 'step()' even though this "
"environment has already returned done = True. You "
"should always call 'reset()' once you receive 'done = "
"True' -- any further steps are undefined behavior."
)
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state), reward, done, {}
def reset(self):
# State: robot-x, robot-y, robot-theta, goal-x, goal-y
self.state = [self.np_random.uniform(low=-self.world_x_limit,
high=self.world_x_limit),
self.np_random.uniform(low=-self.world_y_limit,
high=self.world_y_limit),
self.np_random.uniform(low=-np.pi,
high=np.pi),
self.np_random.uniform(low=-self.world_x_limit,
high=self.world_x_limit),
self.np_random.uniform(low=-self.world_y_limit,
high=self.world_y_limit)]
self.steps_beyond_done = None
self.num_steps = 0
r_x, r_y, r_theta, g_x, g_y = self.state
self.prev_dist = np.linalg.norm([r_x-g_x, r_y-g_y])
return np.array(self.state)
def render(self, mode='human'):
screen_width = int(self.scale * 2*self.world_x_limit)
screen_height = int(self.scale * 2*self.world_y_limit)
world_width = self.world_x_limit * 2
world_height = self.world_y_limit * 2
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
robot = rendering.make_circle(self.scale * self.robot_diameter)
self.robottrans = rendering.Transform()
robot.add_attr(self.robottrans)
robot.set_color(0.0, 0.1, 0.5)
self.viewer.add_geom(robot)
direction = rendering.make_polyline([(0, 0),
(self.scale * self.robot_diameter, 0)])
self.directiontrans = rendering.Transform()
direction.add_attr(self.directiontrans)
direction.set_color(0.8, 0.8, 0.8)
self.viewer.add_geom(direction)
goal = rendering.make_circle(self.scale * self.goal_diameter)
self.goaltrans = rendering.Transform()
goal.add_attr(self.goaltrans)
goal.set_color(0.1, 0.5, 0.1)
self.viewer.add_geom(goal)
if self.state is None:
return None
x = self.state
robotx = (x[0] + self.world_x_limit) * self.scale
roboty = (x[1] + self.world_y_limit) * self.scale
goalx = (x[3] + self.world_x_limit) * self.scale
goaly = (x[4] + self.world_y_limit) * self.scale
self.robottrans.set_translation(robotx, roboty)
self.directiontrans.set_translation(robotx, roboty)
self.directiontrans.set_rotation(x[2])
self.goaltrans.set_translation(goalx, goaly)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
if __name__ == "__main__":
# For testing step functions
import time
nav_env = NavEnv()
nav_env.reset()
time.sleep(1)
sample_action = nav_env.action_space.sample()
while True:
nav_env.render()
state, reward, done, _ = nav_env.step(sample_action)
print(reward)
if done:
while True:
pass
| [
"amyngph@gmail.com"
] | amyngph@gmail.com |
fc274e273effbbfd8f37c4b72009be0dc1e540ac | 7445e5bf01fa41766cc04ad976dd42c8e88cb1fc | /welcome2.py | 7fe2944270e5c358a5530e261e443914ffd6207a | [
"Apache-2.0"
] | permissive | rakshit1994/powertrack-1 | 338d5e2a7c4af66726538d3dc2c5653af9e31f17 | 1460a351b09173e2e3393d53828c35797a7baa96 | refs/heads/master | 2016-09-12T13:19:29.540292 | 2016-05-12T06:56:44 | 2016-05-12T06:56:44 | 58,616,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | # Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os,json
from flask import Flask, jsonify, render_template, redirect, url_for, request
from urllib import urlopen
app = Flask(__name__)
count = 0
final=""
x=""
xx=""
y=""
def keywords(term):
keyword = term
if keyword == "":
return ""
final=""
ekeyword = keyword
nkeyword = ekeyword.split(',')
for i in range(0,len(nkeyword)):
if i == 0:
nkeyword[i] = "\\\\\"" + nkeyword[i] + "\\\\\""
else:
nkeyword[i] = " OR "+"\\\\\"" + nkeyword[i] + "\\\\\""
for i in range(0,len(nkeyword)):
final+=nkeyword[i]
final = "(" + final + ")"
return final
def context(contxt):
keyword = contxt
if keyword == "":
return ""
final=""
ekeyword = keyword
nkeyword = ekeyword.splitlines()
for i in range(0,len(nkeyword)):
if i == 0:
nkeyword[i] = "\\\\\"" + nkeyword[i] + "\\\\\""
else:
nkeyword[i] = " OR "+"\\\\\"" + nkeyword[i] + "\\\\\""
for i in range(0,len(nkeyword)):
final+=nkeyword[i]
final = "(" + final + ")"
return final
def exclude(excl):
keyword = excl
if keyword == "":
return ""
final=""
ekeyword = keyword
nkeyword = ekeyword.splitlines()
for i in range(0,len(nkeyword)):
if i == 0:
nkeyword[i] = "\\\\\"" + nkeyword[i] + "\\\\\""
else:
nkeyword[i] = " OR "+"\\\\\"" + nkeyword[i] + "\\\\\""
for i in range(0,len(nkeyword)):
final+=nkeyword[i]
final = "-(" + final + ")"
return final
@app.route('/',methods = ['GET','POST'])
def Query():
x=""
xx=""
y=""
final=""
if request.method == 'POST':
if request.form['keywords']:
term=request.form['keywords']
contxt=request.form['context']
excl=request.form['excludes']
x = str(keywords(term))
xx = str(context(contxt))
y = str(exclude(excl))
final = "{\"value\":\""+x+" "+xx+" "+y+"\"},"
return render_template('index.html',final=final)
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port),debug=True)
| [
"singhrakshit@gmail.com"
] | singhrakshit@gmail.com |
530fdc5d6a45f3971b47b706210ea10a43889957 | 24ea9f61a3555df660e770c4f84d34097aff152d | /killswitch.py | 50bc7b51449d550d3f0ef9fa14fb62fb07eae4a9 | [] | no_license | Scyless/killswitch | c848feb2a4b29332eb27f0848a23bb716581f96e | 4e828e1ef6b7e6aa3a0b1ec94b1bc2907c34edd4 | refs/heads/master | 2020-07-06T14:48:45.991309 | 2019-08-18T21:11:42 | 2019-08-18T21:11:42 | 203,056,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | #'' The killswitch '''#
#'''Created on Aug 17, 2019'''#
#'''@author: scy'''#
import time
import os
import urllib.request
import psutil
from datetime import datetime
import easyufw as ufw
from playsound import playsound
#'''Start the client'''#
ovpn = "openvpn" in (p.name() for p in psutil.process_iter())
def client():
if ovpn and 'Status: active' in ufw.status():
os.system('sudo -H -u scy bash -c "/usr/bin/qbittorrent" >&/dev/null &')
else:
print('Something isn\'t right. Nuke it three times.')
nuke()
return
#'''Absolutely nuke the shit out of the client'''#
def nuke():
os.system('killall qbittorrent & sudo killall qbittorrent >&/dev/null &')
os.system('pkill -9 qbittorrent & sudo pkill -9 qbittorrent >&/dev/null &')
os.system('pgrep qbittorrent | xargs kill -9 & pgrep qbittorrent | sudo xargs kill -9 >&/dev/null &')
#'''Check our IP address'''#
def check():
ipaddr = (urllib.request.urlopen('https://ident.me').read().decode('utf8'))
ip = ('213.152.162.89')
if ip in ipaddr:
print(datetime.now().strftime('%H:%M:%S') + ' No leak.')
return
client()
#''' Shield 1'''#
while True:
time.sleep(0.5)
try:
check()
except:
nuke()
os.system('notify-send "IP address leaked, nuking."')
print(datetime.now().strftime('%H:%M:%S') + ' IP address leaked, nuking.')
playsound('./gib.mp3')
break
#'''Shield 2'''#
ovpn = "openvpn" in (p.name() for p in psutil.process_iter())
try:
if ovpn and 'Status: active' in ufw.status():
print(datetime.now().strftime('%H:%M:%S') + ' OVPN & UFW running.')
elif ovpn is 0 or'Status: inactive' in ufw.status():
nuke()
os.system('notify-send "Firewall or VPN failed, nuking."')
print(datetime.now().strftime('%H:%M:%S') + ' Firewall or VPN failed, nuking.')
playsound('./gib.mp3')
break
except (ValueError) as err:
print(datetime.now().strftime('%H:%M:%S ') + err)
playsound('./gib.mp3')
nuke() | [
"noreply@github.com"
] | Scyless.noreply@github.com |
235e928730c8d8c9fde2e8fa3eaea25bfd213334 | ba2bfad32435a7c29b2e3cd084778f9031e35b46 | /addepiclc_lcmath.py | 032f928dc1331fd8ef5cd3619dc15b963a672354 | [
"MIT"
] | permissive | evandromr/python_scitools | c13953df0fc32d0ece8cfc0001d9bc88ce19471b | afe8ad113245069af0ef2a05c69046938459ab2d | refs/heads/master | 2020-05-16T20:46:02.475071 | 2015-01-26T19:05:38 | 2015-01-26T19:05:38 | 12,600,983 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | #!/usr/env python
import glob
import subprocess
if __name__ == '__main__':
'''
Add lightcurves
MOS1 + MOS2 = MOSS
PN + MOSS = EPIC
'''
mos1files = glob.glob('mos1_lc_net*')
mos2files = glob.glob('mos2_lc_net*')
pnfiles = glob.glob('pn_lc_net*')
mos1files.sort()
mos2files.sort()
pnfiles.sort()
mossfiles = ['moss'+mos1[4:] for mos1 in mos1files]
epicfiles = ['epic'+mos1[4:] for mos1 in mos1files]
for mos1, mos2, moss in zip(mos1files, mos2files, mossfiles):
subprocess.call(['lcmath', mos1, mos2, moss, '1.', '1.', 'yes'])
for pn, moss, epic in zip(pnfiles, mossfiles, epicfiles):
subprocess.call(['lcmath', pn, moss, epic, '1.', '1.', 'yes'])
mos1files = glob.glob('mos1_lc_bkg*')
mos2files = glob.glob('mos2_lc_bkg*')
pnfiles = glob.glob('pn_lc_bkg*')
mos1files.sort()
mos2files.sort()
pnfiles.sort()
mossfiles = ['moss'+mos1[4:] for mos1 in mos1files]
epicfiles = ['epic'+mos1[4:] for mos1 in mos1files]
for mos1, mos2, moss in zip(mos1files, mos2files, mossfiles):
subprocess.call(['lcmath', mos1, mos2, moss, '1.', '1.', 'yes'])
for pn, moss, epic in zip(pnfiles, mossfiles, epicfiles):
subprocess.call(['lcmath', pn, moss, epic, '1.', '1.', 'yes'])
| [
"evandromartinezribeiro@gmail.com"
] | evandromartinezribeiro@gmail.com |
752d257e3a1e350ceddddf586666ae7e97341291 | 75e2c3f02f99273549405609804530c79320fc63 | /app/main/views.py | 2656bc74a7a3abdcd8d2447c702aeb92a5b160ae | [] | no_license | Sanagiig/myBlog | 8dbd27e2fe8aabd6c2bddba20863439d97254060 | 1bfcbd7a114df2b354bc8800a6fecc8e1bce568d | refs/heads/master | 2021-07-17T21:54:34.371501 | 2017-10-25T03:31:10 | 2017-10-25T03:31:10 | 108,208,889 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,249 | py | from flask import request,current_app,render_template,g,session,render_template_string
from flask_login import current_user
import json
from app.main import main
from app.models import Article,Category,Tag,\
Comment,Reply,\
UserInfo,Article_PutPorC,Comment_PutPorC,Reply_PutPorC,\
Anonymous
#首页
#返回文章
@main.route('/',methods=['GET','POST'])
def index():
page = request.args.get('page',1,type=int)
search_category = request.args.get('category','',type=str)
search_tag = request.args.get('tag','',type=str)
search_text = request.args.get('search','',type=str)
if search_category:
pagination = Category.query.filter_by(name=search_category).first()\
.get_articles()\
.paginate(page=page,per_page=current_app.config['ARTICLE_PER_PAGE'],error_out=False)
if search_tag:
pagination = Tag.query.filter_by(name=search_tag).first()\
.get_articles()\
.paginate(page=page,per_page=current_app.config['ARTICLE_PER_PAGE'],error_out=False)
if search_text:
pagination = Article.query.filter(Article.title.like('%'+ search_text + '%'))\
.paginate(page=page,per_page=current_app.config['ARTICLE_PER_PAGE'],error_out=False)
if not (search_category or search_tag or search_text):
pagination = Article.query.order_by(Article.created_time.desc()).paginate(
page,per_page=current_app.config['ARTICLE_PER_PAGE'],error_out=False)
categories = Category.query.all()
tags = Tag.query.all()
articles = pagination.items
return render_template('blog/index.html',user_info=current_user,
articles=articles,pagination=pagination,comment='',
categories=categories,tags=tags,
search_category=search_category,search_tag=search_tag,search_text=search_text)
@main.app_errorhandler(403)
def err403(e):
return render_template('blog/404.html',info=e)
@main.app_errorhandler(404)
def err404(e):
return render_template('blog/404.html',info=e,user_info='')
@main.route('/test')
def test():
data={
'code':session['check_code'],
}
return json.dumps(data) | [
"laiwenjunhhh@163.com"
] | laiwenjunhhh@163.com |
38bc5a82aded1c7c235e5a86140665f0845a4f34 | a93f2f699e666233bcd546b759e60adc36190129 | /Action/ac_CPA_Report_Sale.py | 7ca39ad18beabeac64476a29c89f20d9ccaa35d1 | [] | no_license | Javis1205/cpa_report | a85ae8fed3238f290631c879b05443812ccdeada | 6c8f1260ab00ca010b39cf612c2b2d091e2c4256 | refs/heads/master | 2021-01-20T19:39:44.766124 | 2016-08-11T03:06:53 | 2016-08-11T03:06:53 | 65,373,132 | 0 | 1 | null | 2016-08-11T03:07:43 | 2016-08-10T10:21:31 | Python | UTF-8 | Python | false | false | 6,497 | py | import sys
sys.path.append('Class/')
sys.path.append('Functions/')
sys.path.append('Config/')
import config
import convert_datetime, datetime, functions
import cls_DataFactory_DS, cls_AssemblyWorker, cls_GSS
def return_dict_data(sta_date, end_date):
sql_update_data_fixed = """ SELECT MIN(ord_id), MAX(ord_id) FROM orders_new
WHERE ord_date BETWEEN {}
AND {} """.format(sta_date, end_date)
max_min_ord_id_fixed = Data.Fetch_All(sql_update_data_fixed)
dic_return_fixed = {}
list_ord_id = []
for i in functions.my_range(max_min_ord_id_fixed[0][0], max_min_ord_id_fixed[0][1],1000):
# get estore_id
lm = i + 1000 # limit
if (lm > max_min_ord_id_fixed[0][1]):
lm = max_min_ord_id_fixed[0][1]
if(lm <= max_min_ord_id_fixed[0][1]):
sql_get = """SELECT ord_id, ord_estore_id, ord_code, ord_date FROM orders_new
WHERE ord_id BETWEEN {0}
AND {1}
AND ord_estore_id IN({2})""".format(i, lm,','.join(GSpS.Get_Value_Gs_By_Range('')))
dic = Data.Fetch_All(sql_get)
for j in range(len(dic)):
est_id = dic[j][1]
list_ord_id.append(dic[j][0]);
dic_return_fixed.update({'orders_product_{0}'.format(est_id % 20) : list_ord_id})
list_fixed = []
for k in dic_return_fixed:
# chuyển list sang string
str_ord_id = ', '.join(str(e) for e in dic_return_fixed[k])
# sql lấy ra thông tin của đơn hàng
sql_info_order_fixed = """ SELECT ord_code, ord_phone, ord_estore_id, ord_status, pro_id, pro_category, op_price, op_quantity, onc_status, orr_source_referer FROM {0}
LEFT JOIN orders_new ON op_order_id = ord_id
LEFT JOIN orders_new_checked ON ord_id = onc_order_id
LEFT JOIN orders_referer ON ord_id = orr_order_id
LEFT JOIN products_multi ON op_product_id = pro_id
WHERE op_order_id IN ({1}) """.format(k, str_ord_id)
list_fixed.append(Data.Fetch_All(sql_info_order_fixed))
return list_fixed
#
def update_gs_data(data , sh_name, current_row):
row = len(data)
col = 0
if (row >= 1):
col = len(data[0])
max_cell_label = sh.get_addr_int(row + int(current_row) ,col)
rg = 'A{}:{}'.format(current_row, max_cell_label)
GSpS.Set_Value(data, sh_name, rg)
#
def update_row_in_tab_config(row):
range_update = 'B{}'.format(config.gss_config['position_row_update'] + 1)
GSpS.Set_Value([[row]], config.gss_config['tab_config'], range_update)
#
# action
DF_DS = cls_DataFactory_DS.DataFactoryDS()
AW = cls_AssemblyWorker.AssemblyWorker(DF_DS)
AW.Perform()
Data = AW.DS
gs = AW.Keeper
GSpS = gs.gss()
Parser = AW.Parser
# get position row insert
data_range = GSpS.Get_Value(config.gss_config['col_data'], config.gss_config['tab_config'])
sh_name = data_range[config.gss_config['tab_raw_data']].value
row_update = data_range[config.gss_config['position_row_update']].value
sh = GSpS.Get_Sheet(sh_name)
start_date = data_range[config.gss_config['min_date']].value #"01/05/2016"
int_start_date, int_first_day_of_month_ago, int_today, int_first_day_of_month_before = functions.get_first_of_month(start_date, 15)
# 01/05/2016 , 01/8/2016, 16/8/2016, 16
if (int(row_update) == 2):
list_data_fixed = return_dict_data(int_start_date,(int_first_day_of_month_ago - (24 * 60 * 60)))
# list_data_fixed = [
# ['dailyphukien_1503852_thang5_6','1262268262','1034043','5','406271','4036','85000','1','10','None'],
# ['dailyphukien_1503981_20160505','1627368583','1034043','5','4611197','5781','120000','1','10','None'],
# ['Senkai_1506240_20160507','913537661','1271175','100','5671877','15389','68000','3','15','None'],
# ['Senkai_1506283_20160507','913537661','1271175','5','5761211','15389','350000','1','10','None'],
# ['dailyphukien_1506415_20160507','903936012','1034043','5','1619215','464','450000','1','10','None'],
# ['dailyphukien_1507806_20160508','989942586','1034043','5','453464','5699','50000','1','10','None']
# ]
data_gs_fixed = Parser.ProcessData(list_data_fixed,int(row_update))
update_gs_data(data_gs_fixed,sh_name,int(row_update))
row_update = int(row_update) + len(data_gs_fixed)
update_row_in_tab_config(row_update)
# nếu ngày bằng 16
if (int(int_first_day_of_month_before) != 0):
# cập nhật lại row update trên tab config = số hiện tại + số bản ghi trong tháng trc
list_data_of_month_before = return_dict_data(int_first_day_of_month_before, int_first_day_of_month_ago - (24 * 60 * 60))
# list_data_of_month_before = [
# ['giadunggiarevn_1646262_thang7','1262268262','1034043','5','406271','4036','85000','1','10','None'],
# ['giadunggiarevn_1646262_20160724','1627368583','1034043','5','4611197','5781','120000','1','10','None'],
# ['myphamtrangnhung1_1646319_20160724','913537661','1271175','100','5671877','15389','68000','3','15','cm=noibo_cpa_nguyenhuong_giadungiare_5841719_dm_313x120'],
# ['Senkai_1506283_20160507','913537661','1271175','5','5761211','15389','350000','1','10','None'],
# ['dailyphukien_1646719_20160724','903936012','1034043','5','1619215','464','450000','1','10','None'],
# ['myphamtrangnhung1_1646319_20160724','989942586','1034043','5','453464','5699','50000','1','10','cm=noibo_cpa_nguyenhuong_giadungiare_5841719_dm_313x120']
# ]
data_gs_of_month_before = Parser.ProcessData(list_data_of_month_before,int(row_update))
update_gs_data(data_gs_of_month_before,sh_name,int(row_update))
row_update = int(row_update) + len(data_gs_of_month_before)
update_row_in_tab_config(row_update)
#
list_data = return_dict_data(int_first_day_of_month_ago, int_today)
# list_data = [
# ['verygoodvn_1679495_thang_8','1262268262','1034043','5','406271','4036','85000','1','10','None'],
# ['giadunggiarevn_1646262_20160724','1627368583','1034043','5','4611197','5781','120000','1','10','None'],
# ['myphamtrangnhung1_1646319_20160724','913537661','1271175','100','5671877','15389','68000','3','15','cm=noibo_cpa_nguyenhuong_giadungiare_5841719_dm_313x120'],
# ['Senkai_1506283_20160507','913537661','1271175','5','5761211','15389','350000','1','10','None'],
# ['dailyphukien_1646719_20160724','903936012','1034043','5','1619215','464','450000','1','10','None'],
# ['dailyphukien_1646719_thang_9','903936012','1034043','5','1619215','464','450000','1','10','None'],
# ['myphamtrangnhung1_1646319_20160724','989942586','1034043','5','453464','5699','50000','1','10','cm=noibo_cpa_nguyenhuong_giadungiare_5841719_dm_313x120']
# ]
data_gs = Parser.ProcessData(list_data,int(row_update))
update_gs_data(list_data,sh_name,int(row_update)) | [
"dai12051997@gmail.com"
] | dai12051997@gmail.com |
6da0310c39bad903d29b919f240b4d114494c7a3 | 0fa774e0aadacde7b8420c5012cd9e7dcffc7cee | /gyro_3dv3.py | cc567f2ab0a6c6efd1c876f1a5520a9ee4b59d7b | [] | no_license | DreamKiller-Z/smart-recipe | 06be3215f021de077ce4dc8dc9e0c82213760230 | 38256d7d8cc0f3135b13740bf94f009d7b5fa59d | refs/heads/master | 2020-05-23T21:40:45.903409 | 2019-10-04T01:27:46 | 2019-10-04T01:27:46 | 186,957,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,661 | py | #!/usr/bin/python
import smbus
import math
import time
import pandas as pd
import numpy as np
import os
# from pynput.keyboard import Key, Listener
# import cv2
# Register
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
def read_byte(reg):
return bus.read_byte_data(address, reg)
def read_word(reg):
h = bus.read_byte_data(address, reg)
l = bus.read_byte_data(address, reg+1)
value = (h << 8) + l
return value
def read_word_2c(reg):
val = read_word(reg)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def dist(a,b):
return math.sqrt((a*a)+(b*b))
def get_y_rotation(x,y,z):
radians = math.atan2(x, dist(y,z))
return -math.degrees(radians)
def get_x_rotation(x,y,z):
radians = math.atan2(y, dist(x,z))
return math.degrees(radians)
def sendMessageTo(targetBluetoothMacAddress):
port = 1
sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
sock.connect((targetBluetoothMacAddress, port))
sock.send("hello!!")
sock.close()
# Collect events until released
#gather 10 times of data
def collecting(target):
# target = 0
output = []
dataset = []
start = time.time()
for i in range(100):
print("Gyroscope")
print("--------")
gyroskop_xout = read_word_2c(0x43)
gyroskop_yout = read_word_2c(0x45)
gyroskop_zout = read_word_2c(0x47)
gy_x = np.divide(gyroskop_xout , 131)
gy_y = np.divide(gyroskop_yout , 131)
gy_z = np.divide(gyroskop_zout , 131)
print ("gyroscope_xout: ", ("%5d" % gyroskop_xout), " scale: ", gy_x)
print ("gyroscope_yout: ", ("%5d" % gyroskop_yout), " scale: ", gy_y)
print ("gyroscope_zout: ", ("%5d" % gyroskop_zout), " scale: ", gy_z)
print ("Acceleration")
print ("---------------------")
beschleunigung_xout = read_word_2c(0x3b)
beschleunigung_yout = read_word_2c(0x3d)
beschleunigung_zout = read_word_2c(0x3f)
beschleunigung_xout_skaliert = np.divide(beschleunigung_xout , 16384.0)
beschleunigung_yout_skaliert = np.divide(beschleunigung_yout , 16384.0)
beschleunigung_zout_skaliert = np.divide(beschleunigung_zout , 16384.0)
print ("accel_xout: ", ("%6d" % beschleunigung_xout), " scale: ", beschleunigung_xout_skaliert)
print ("accel_yout: ", ("%6d" % beschleunigung_yout), " scale: ", beschleunigung_yout_skaliert)
print ("accel_zout: ", ("%6d" % beschleunigung_zout), " scale: ", beschleunigung_zout_skaliert)
print ("X Rotation: " , get_x_rotation(beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, beschleunigung_zout_skaliert))
print ("Y Rotation: " , get_y_rotation(beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, beschleunigung_zout_skaliert))
data = [gy_x, gy_y, gy_z, beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, beschleunigung_zout_skaliert, target]
dataset.append(data)
time.sleep(0.2)
print("total time: ", time.time() - start)
# output.append(dataset)
# output.append(target)
return dataset
bus = smbus.SMBus(1) # bus = smbus.SMBus(0) fuer Revision 1
address = 0x68 # via i2cdetect
# Aktivieren, um das Modul ansprechen zu koennen
bus.write_byte_data(address, power_mgmt_1, 0)
process = time.time()
try:
df_read=pd.read_csv("testdata.csv")
except FileNotFoundError:
f = open("testdata.csv", "w")
except pd.io.common.EmptyDataError:
pass
choose = True
while(choose):
print ("---------------------")
print("1. collect data \n2. save and end \n3. delete csv \n4. look at testdata.csv")
print ("---------------------")
answer = input()
print(answer)
if answer == '1':
print("target label? 0 for static")
choose_label = input()
# print("collecting data")
data = collecting(choose_label)
df_temp = pd.DataFrame(np.array(data), columns = ["g_x", "g_y", "g_z", "a_x", "a_y", "a_z","target"])
with open("testdata.csv", "a") as f:
df_temp.to_csv(f, index =False, header =False)
# print(df_temp.tail(5))
# result = processing(data)
data = np.asarray(data)
print(data)
print(data.shape)
elif answer == '2':
choose = False
print("end")
# sendMessageTo("B8:27:EB:16:64:7A")
elif answer =='3':
os.remove("testdata.csv")
print("delete csv")
choose = False
elif answer =="4":
df_read = pd.read_csv("testdata.csv")
print(df_read.tail(10))
choose =False
| [
"zyw666@uw.edu"
] | zyw666@uw.edu |
c606713dfad3e29420a9b92000d57f563d35f17f | db31945a728752ba954a025ef0dddc946b27d2b8 | /vaccineAvailabilityNotifier/commands/router.py | b44126a5bf68016f6bbd12f208a357499bc832cd | [
"MIT"
] | permissive | bhaskernitt/vaccine-availability-notifier | 0ed56db3f4efa9eeb17e93c96fbd5ee10e51564d | 20d020b2050ca76dca190dc6e767dcc5697339b8 | refs/heads/master | 2023-05-08T22:47:23.049439 | 2021-06-03T15:38:16 | 2021-06-03T15:38:16 | 365,131,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | import click
from .sub_commands import schedule_job_cmd
def safe_entry_point():
entry_point()
# except Exception as e:
# print(Fore.RED, '------------------------------------------')
# print(Fore.RED, 'unable to process../ COMMAND NOT NOT FOUND',e)
# print(Fore.RED, '------------------------------------------')
# exit(0)
def get_cmd_help():
return "------------------------------------------------------------------------\nnotify yourself when vaccine is " \
"available " \
"\n-------------------------------------------------------------------------- "
@click.group(help=get_cmd_help())
@click.pass_context
def entry_point(ctx):
"""notify yourself when vaccine is available"""
# info cmd
@entry_point.command("info")
@click.pass_context
def info(ctx):
"""notify yourself when vaccine is available"""
# feedback command
@entry_point.command("feedback", help='provide the feedback')
@click.pass_context
@click.option("--output-format", "-of", default='table', type=str, required=False)
def feedback(ctx, output_format):
"""provide the feedback"""
entry_point.add_command(schedule_job_cmd.cmd)
entry_point.add_command(schedule_job_cmd.cmd_get_state_id)
entry_point.add_command(schedule_job_cmd.cmd_get_district_id)
| [
"bhasker.nandkishor@gmail.com"
] | bhasker.nandkishor@gmail.com |
7eb8ea70ec6bbc7a25369c496fa0b110a37dcb37 | c29f7c0ead08c93b0185daa70e129ffe016a6aae | /store/migrations/0024_auto_20171114_1432.py | c5717e7477357d97d52a7cb58a8c4fb49a7ac7b3 | [] | no_license | amandacav/media_v1 | 280ccd368f9f0e358315d08fb9001054612bc14b | bb903bd2b21088b5be96ff1bda8313bd6165e24a | refs/heads/master | 2022-12-22T12:09:28.210106 | 2021-02-16T21:20:48 | 2021-02-16T21:20:48 | 92,543,691 | 0 | 1 | null | 2022-12-07T23:59:17 | 2017-05-26T19:49:00 | HTML | UTF-8 | Python | false | false | 632 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-14 19:32
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0023_auto_20171114_1132'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL),
),
]
| [
"harrisons1@hhmi.org"
] | harrisons1@hhmi.org |
4fd102c324075eef6ff94a31890e60d8667dd49e | 11928b93ef00667411bcb1b6b32121ddf3566ee3 | /index.py | b6002a854188933a8dab6de393c6434f36797c9a | [] | no_license | Gersonrecinos02/examen-finalb | d4055469ae1c2957222221d7b54432b77222588e | 0fb5371335eab1183681b520df3dc2f75ac9c4db | refs/heads/master | 2022-10-02T18:36:56.395914 | 2020-06-06T23:28:09 | 2020-06-06T23:28:09 | 270,005,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | from tkinter import *
root = Tk()
ancho = 400
alto = 270
root.geometry(str(ancho)+"x"+str(alto))
root.title("FINAL")
saludo = Label(text="Bienvenido",font=("Agency FB",14)).place(x=150,y=5)
lblname=Label(text="Nombre",font=("Agency FB",16)).place(x=80,y=30)
entrada=StringVar()
nombre =Entry(root,textvariable=entrada).place(x=135,y=40)
lblape=Label(text="Apellido",font=("Agency FB",16)).place(x=80,y=60)
entrada=StringVar()
apellido=Entry(root,textvariable=entrada).place(x=135,y=70)
lbldia=Label(text="Dia",font=("Agency FB",16)).place(x=80,y=90)
entrada=StringVar()
dia=Entry(root,textvariable=entrada).place(x=135,y=100)
lblmes=Label(text="Mes",font=("Agency FB",16)).place(x=80,y=120)
entrada=StringVar()
mes=Entry(root,textvariable=entrada).place(x=135,y=130)
lblaño=Label(text="Año",font=("Agency FB",16)).place(x=80,y=150)
entrada=StringVar()
año=Entry(root,textvariable=entrada).place(x=135,y=160)
btnFuncion1 = Button(root, text= "FUNCION 1",font=("Agency FB",10),width=10).place(x=90,y=180)
btnFuncion1 = Button(root, text= "FUNCION 2",font=("Agency FB",10),width=10).place(x=140,y=180)
btnFuncion1 = Button(root, text= "FUNCION 3",font=("Agency FB",10),width=10).place(x=190,y=180)
btnFuncion4 = Button(root, text= "FUNCION 4",font=("Agency FB",10),width=10).place(x=240,y=180)
btnFuncion5 = Button(root, text= "FUNCION 5",font=("Agency FB",10),width=10).place(x=290,y=180)
root.mainloop()
| [
"gersonenriquerecinoslemus@gmail.com"
] | gersonenriquerecinoslemus@gmail.com |
e7cdfda84040ee41a1ec5676b2980c6c554743da | bc5ecee964520ecfb6df6cd6df79569a4a418a68 | /Seance 1/Exo05.py | 6b0d8c4480d386c6efcbab5c9b802e9de8dbbea9 | [] | no_license | EnzoZuniga/Python | 839225c7b904fc2f0379ed734432a7d948091b19 | 1c0ffdb8f25c933b82ba2f6f52b9e8489c3c8c4e | refs/heads/master | 2020-04-01T23:13:02.205515 | 2019-03-15T15:29:34 | 2019-03-15T15:29:34 | 153,748,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | print('Dans cette algorithme, la première variable prendra la valeur de la troisième variable, et la deuxième de la quatrème.')
a=int(input('Entrez la valeur de votre première variable :'))
b=int(input('Entrez la valeur de votre seconde variable :'))
c=int(input('Entrez la valeur de votre troisième variable :'))
d=int(input('Entrez la valeur de votre quatrième variable :'))
a, b, c, d = c, d, a, b
print('Première variable=',a,'Deuxième variable=',b,'Troisime variable=',c,'Quatrième variable=',d)
| [
"ezuniga@sio.jjr"
] | ezuniga@sio.jjr |
83797b20d04be3d673211f36fc563230f6841f17 | c3c51ba1a2981bba05f2a82ff3b536bbcc6e72b5 | /owtf/api/handlers/plugin.py | f60803092a971f08c59d11c688da5fe50f7ec6de | [
"BSD-3-Clause"
] | permissive | alienus/owtf | 83761a501e21f768eba7a2dc272d03e6bd201be1 | b6d81fac83c324c2b8c6fe2a974c036881c1fcd0 | refs/heads/master | 2021-04-28T09:06:41.048563 | 2018-02-20T05:27:57 | 2018-02-20T05:27:57 | 122,031,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,382 | py | """
owtf.api.plugin
~~~~~~~~~~~~~~~
"""
import collections
import logging
import tornado.gen
import tornado.httpclient
import tornado.web
from owtf.api.handlers.base import APIRequestHandler
from owtf.lib import exceptions
from owtf.managers.mapping import get_all_mappings
from owtf.managers.plugin import get_types_for_plugin_group, get_all_plugin_dicts, get_all_test_groups
from owtf.managers.poutput import get_all_poutputs, update_poutput, delete_all_poutput
class PluginDataHandler(APIRequestHandler):
SUPPORTED_METHODS = ['GET']
# TODO: Creation of user plugins
def get(self, plugin_group=None, plugin_type=None, plugin_code=None):
try:
filter_data = dict(self.request.arguments)
if not plugin_group: # Check if plugin_group is present in url
self.write(get_all_plugin_dicts(self.session, filter_data))
if plugin_group and (not plugin_type) and (not plugin_code):
filter_data.update({"group": plugin_group})
self.write(get_all_plugin_dicts(self.session, filter_data))
if plugin_group and plugin_type and (not plugin_code):
if plugin_type not in get_types_for_plugin_group(self.session, plugin_group):
raise tornado.web.HTTPError(400)
filter_data.update({"type": plugin_type, "group": plugin_group})
self.write(get_all_plugin_dicts(self.session, filter_data))
if plugin_group and plugin_type and plugin_code:
if plugin_type not in get_types_for_plugin_group(self.session, plugin_group):
raise tornado.web.HTTPError(400)
filter_data.update({"type": plugin_type, "group": plugin_group, "code": plugin_code})
# This combination will be unique, so have to return a dict
results = get_all_plugin_dicts(self.session, filter_data)
if results:
self.write(results[0])
else:
raise tornado.web.HTTPError(400)
except exceptions.InvalidTargetReference as e:
logging.warn(e.parameter)
raise tornado.web.HTTPError(400)
class PluginNameOutput(APIRequestHandler):
SUPPORTED_METHODS = ['GET']
def get(self, target_id=None):
"""Retrieve scan results for a target.
:return: {code: {data: [], details: {}}, code2: {data: [], details: {}} }
This API doesn't return `output` section as part of optimization.
`data` is array of scan results according to `plugin_types`.
`details` contains info about `code`.
"""
try:
filter_data = dict(self.request.arguments)
results = get_all_poutputs(self.session, filter_data, target_id=int(target_id), inc_output=False)
# Get mappings
mappings = get_all_mappings(self.session)
# Get test groups as well, for names and info links
groups = {}
for group in get_all_test_groups(self.session):
group['mappings'] = mappings.get(group['code'], {})
groups[group['code']] = group
dict_to_return = {}
for item in results:
if (dict_to_return.has_key(item['plugin_code'])):
dict_to_return[item['plugin_code']]['data'].append(item)
else:
ini_list = []
ini_list.append(item)
dict_to_return[item["plugin_code"]] = {}
dict_to_return[item["plugin_code"]]["data"] = ini_list
dict_to_return[item["plugin_code"]]["details"] = groups[item["plugin_code"]]
dict_to_return = collections.OrderedDict(sorted(dict_to_return.items()))
if results:
self.write(dict_to_return)
else:
raise tornado.web.HTTPError(400)
except exceptions.InvalidTargetReference as e:
logging.warn(e.parameter)
raise tornado.web.HTTPError(400)
except exceptions.InvalidParameterType as e:
logging.warn(e.parameter)
raise tornado.web.HTTPError(400)
class PluginOutputHandler(APIRequestHandler):
SUPPORTED_METHODS = ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']
def get(self, target_id=None, plugin_group=None, plugin_type=None, plugin_code=None):
try:
filter_data = dict(self.request.arguments)
if plugin_group and (not plugin_type):
filter_data.update({"plugin_group": plugin_group})
if plugin_type and plugin_group and (not plugin_code):
if plugin_type not in get_types_for_plugin_group(self.session, plugin_group):
raise tornado.web.HTTPError(400)
filter_data.update({"plugin_type": plugin_type, "plugin_group": plugin_group})
if plugin_type and plugin_group and plugin_code:
if plugin_type not in get_types_for_plugin_group(self.session, plugin_group):
raise tornado.web.HTTPError(400)
filter_data.update({
"plugin_type": plugin_type,
"plugin_group": plugin_group,
"plugin_code": plugin_code
})
results = get_all_poutputs(self.session, filter_data, target_id=int(target_id), inc_output=True)
if results:
self.write(results)
else:
raise tornado.web.HTTPError(400)
except exceptions.InvalidTargetReference as e:
logging.warn(e.parameter)
raise tornado.web.HTTPError(400)
except exceptions.InvalidParameterType as e:
logging.warn(e.parameter)
raise tornado.web.HTTPError(400)
def post(self, target_url):
raise tornado.web.HTTPError(405)
def put(self):
raise tornado.web.HTTPError(405)
def patch(self, target_id=None, plugin_group=None, plugin_type=None, plugin_code=None):
try:
if (not target_id) or (not plugin_group) or (not plugin_type) or (not plugin_code):
raise tornado.web.HTTPError(400)
else:
patch_data = dict(self.request.arguments)
update_poutput(self.session, plugin_group, plugin_type, plugin_code, patch_data, target_id=target_id)
except exceptions.InvalidTargetReference as e:
logging.warn(e.parameter)
raise tornado.web.HTTPError(400)
except exceptions.InvalidParameterType as e:
logging.warn(e.parameter)
raise tornado.web.HTTPError(400)
def delete(self, target_id=None, plugin_group=None, plugin_type=None, plugin_code=None):
try:
filter_data = dict(self.request.arguments)
if not plugin_group: # First check if plugin_group is present in url
delete_all_poutput(self.session, filter_data, target_id=int(target_id))
if plugin_group and (not plugin_type):
filter_data.update({"plugin_group": plugin_group})
delete_all_poutput(self.session, filter_data, target_id=int(target_id))
if plugin_type and plugin_group and (not plugin_code):
if plugin_type not in get_types_for_plugin_group(self.session, plugin_group):
raise tornado.web.HTTPError(400)
filter_data.update({"plugin_type": plugin_type, "plugin_group": plugin_group})
delete_all_poutput(self.session, filter_data, target_id=int(target_id))
if plugin_type and plugin_group and plugin_code:
if plugin_type not in get_types_for_plugin_group(self.session, plugin_group):
raise tornado.web.HTTPError(400)
filter_data.update({
"plugin_type": plugin_type,
"plugin_group": plugin_group,
"plugin_code": plugin_code
})
delete_all_poutput(self.session, filter_data, target_id=int(target_id))
except exceptions.InvalidTargetReference as e:
logging.warn(e.parameter)
raise tornado.web.HTTPError(400)
except exceptions.InvalidParameterType as e:
logging.warn(e.parameter)
raise tornado.web.HTTPError(400)
| [
"bradleywitcher@gmail.com"
] | bradleywitcher@gmail.com |
385aff300767551db85ad08bc165eda8b9f974dc | 5400469e22bd3522447ed50a466e3f0253e2fd0d | /utils/data_utils.py | 615287b88238594459e8b27c182b792d8acf7a29 | [] | no_license | guyucowboy/LungCancerDiagnosis-pytorch | 8effefda5bf2b55a52475ae80d2a316b71a7022b | 018853de09f77a3177b77edc942f92ca8057193d | refs/heads/master | 2020-09-01T11:55:25.135948 | 2018-09-23T21:47:38 | 2018-09-23T21:47:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,035 | py |
import time
import numpy as np
import random
import os
import argparse
from joblib import Parallel, delayed
import scipy.misc
import misc_utils as gp
from scipy import ndimage
def create_test_dataset(data_dir, AUG):
NSL = 5 #Number of slices (lanes) for multi-line network
files = os.listdir(data_dir)
files.sort()
f = np.load(data_dir + files[0])
"""
f['arr_0'] = data
f['arr_1'] = scores
f['arr_2'] = patient name
f['arr_3'] = mosaic
"""
data = np.swapaxes(f['arr_0'],0,3)
data = np.swapaxes(data,1,2)
data_train = data[:,:,:,0:NSL]
label_train = np.load('ISBI_train_label.npy')
scores_train = f['arr_1'][0:NSL]
data_train = np.expand_dims(data_train, axis=4)
scores_train = np.expand_dims(scores_train, axis=0)
idxp = range(0, 59, 2)
idxi = range(1, 60, 2)
label_train = label_train[idxp]
for ff in range(len(files)-1):
start_time = time.time()
filename = data_dir + files[ff+1]
f = np.load(filename)
data = np.swapaxes(f['arr_0'],0,3)
data = np.swapaxes(data,0,2)
scores = f['arr_1']
data = np.expand_dims(data, axis=4)
scores = np.expand_dims(scores, axis=0)
data_train = np.append(data_train, data[:,:,:,0:NSL], axis=4)
scores_train = np.append(scores_train, scores[:,0:NSL], axis=0)
return data_train, label_train
def create_candidate_slices(vol_dir, cand_dir, slid_dir):
files = os.listdir(cand_dir)
sz = 12 #size of slice (sz*2,sz*2,sz*2: 24 x 24 x 24)
for f in range(len(files)):
start_time = time.time()
vol = np.load(str(vol_dir + '/' + files[f]))
centroids = np.load(str(cand_dir + '/' + files[f]))
szcen = len(centroids)
if len(np.shape(centroids)) > 1:
tz, tx, ty = np.shape(vol)
I = []
candsx = centroids[:,1]
candsy = centroids[:,2]
candsz = centroids[:,0]
good = np.where(np.logical_and(np.logical_and(candsx > sz , (tx - candsx) > sz) ,
np.logical_and(np.logical_and(candsy > sz , (ty - candsy) > sz) ,
np.logical_and(candsz > sz , (tz - candsz) > sz))))
centroids = centroids[good,:]
centroids = centroids.reshape(np.shape(centroids)[1],np.shape(centroids)[2])
for k in range(len(centroids)):
im = []
for l in range(-sz,sz):
im1 = vol[int(centroids[k,0]+l),
int(centroids[k,1]-sz) : int(sz+centroids[k,1]),
int(centroids[k,2]-sz) : int(sz+centroids[k,2])]
im.extend([im1])
im = np.asarray(im)
im = np.swapaxes(im,0,2)
I.extend([im])
slides = np.asarray(I)
out_name = str(slid_dir + '/' + str(files[f][:-4]))
np.save(out_name, slides)
print('slices - Patient: ' + str(f+1) + '/' + str(len(files)) +
' (' + str(round(time.time() - start_time,2)) + 's)')
def candidate_extraction(vol_dir, cand_dir):
files = os.listdir(vol_dir)
for f in range(len(files)):
start_time = time.time()
pix_resampled = np.load(str(vol_dir + '/' + files[f]))
# lung extraction
segmented_lungs_fill = gp.segment_lung_mask(pix_resampled, True)
extracted_lungs = pix_resampled * segmented_lungs_fill
minv = np.min(extracted_lungs)
extracted_lungs = extracted_lungs - minv
extracted_lungs[extracted_lungs == -minv] = 0
# filtering
filtered_vol = ndimage.median_filter(extracted_lungs, 3)
# opening by reconstruction
marker = gp.erode(filtered_vol, [3,3,3]) # 3D grey erosion
op_reconstructed = gp.reconstruct(marker, filtered_vol) # 3D grey reconstruction
regional_max = gp.regional_maxima(op_reconstructed) # Regional maxima
# Computed centroids and centroids from annotations
centroids, nconncomp = gp.centroids_calc(regional_max) # Computed centroids
np.save(cand_dir + '/' + str(files[f][:-4]), centroids)
print('cands - Patient: ' + str(f+1) + '/' + str(len(files)) +
' (' + str(round(time.time() - start_time,2)) + 's)')
def create_patients_from_dicom(dicom_dir, vols_dir):
patients = os.listdir(dicom_dir)
patients.sort()
for i in range(len(patients)):
start_time = time.time()
subdir = os.listdir(str(dicom_dir + patients[i]))
subdir.sort()
i_patient = gp.load_scan(dicom_dir + patients[i] + '/' + subdir[1])
i_patient_pixels = gp.get_pixels_hu(i_patient)
pix_resampled, spacing = gp.resample(i_patient_pixels, i_patient, [1.26,.6929,.6929])
filename = (vols_dir + patients[i] + '_2000')
np.save(filename, pix_resampled)
print('discom2npy - Patient: ' + str(i+1) + '/' + str(len(patients)) +
' (' + str(round(time.time() - start_time,2)) + 's)')
| [
"noreply@github.com"
] | guyucowboy.noreply@github.com |
c10ae7a7917a0f0a63a1d0f344adbeb560387611 | 3358d5efaa9d6aeadfa530cc7ae5a8e1b54f161e | /triangle/test.py | d82daa098bdfc171868dd3fd771265c174a68138 | [] | no_license | datnt55/ktpm2013 | 0e56e13477824da86132d12ebf2ef2999b1039ac | a432b76a01eaca43f703bce1c6664399e9f82145 | refs/heads/master | 2021-01-22T07:32:36.839341 | 2013-10-19T14:31:10 | 2013-10-19T14:31:10 | 12,721,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,134 | py | import unittest
import math
import triangle
__author__ = 'datnt_55'
class TestTriangle(unittest.TestCase):
# Test mien gia tri
def testKhoangGiaTri1(self):
self.assertEqual(triangle.detect_triangle(3,5,-1),"Ngoai khoang gia tri")
def testKhoangGiaTri2(self):
self.assertEqual(triangle.detect_triangle(math.pow(2,32)+1,math.pow(2,32)+1,math.pow(2,32)+1),"Ngoai khoang gia tri")
def testKhoangGiaTri3(self):
self.assertEqual(triangle.detect_triangle(math.pow(2,35)+1,math.pow(2,36)+1,1),"Ngoai khoang gia tri")
def testKhoangGiaTri4(self):
self.assertEqual(triangle.detect_triangle(-100,math.pow(2,36)+1,1),"Ngoai khoang gia tri")
# Test kieu hop le
def testKieuXacDinh1(self):
self.assertEqual(triangle.detect_triangle(math.pow(2,33),math.pow(2,33),'vnu'),"Kieu khong hop le")
def testKieuXacDinh2(self):
self.assertEqual(triangle.detect_triangle(math.pow(2,33),'uet','vnu'),"Kieu khong hop le")
def testKieuXacDinh3(self):
self.assertEqual(triangle.detect_triangle('d','uet','vnu'),"Kieu khong hop le")
def testKieuXacDinh4(self):
self.assertEqual(triangle.detect_triangle('d',9,'vnu'),"Kieu khong hop le")
#Test tam giac vuong
def testTamGiacVuong1(self):
self.assertEqual(triangle.detect_triangle(3,4,5),"Tam giac vuong")
def testTamGiacVuong2(self):
self.assertEqual(triangle.detect_triangle(3.0,2.0,math.sqrt(13)),"Tam giac vuong")
def testTamGiacVuong3(self):
self.assertEqual(triangle.detect_triangle(math.sqrt(13),math.sqrt(7),math.sqrt(20)),"Tam giac vuong")
def testTamGiacVuong4(self):
self.assertEqual(triangle.detect_triangle(math.sqrt(13),math.sqrt(12),5.0),"Tam giac vuong")
#Test tam giac vuong can
def testTamGiacVuongCan1(self):
self.assertEqual(triangle.detect_triangle(1.0,1.0,math.sqrt(2)),"Tam giac vuong can")
def testTamGiacVuongCan2(self):
self.assertEqual(triangle.detect_triangle(2.0,math.sqrt(8),2.0),"Tam giac vuong can")
def testTamGiacVuongCan3(self):
self.assertEqual(triangle.detect_triangle(math.sqrt(5),math.sqrt(5),math.sqrt(10)),"Tam giac vuong can")
def testTamGiacVuongCan4(self):
self.assertEqual(triangle.detect_triangle(4.0,math.sqrt(8),math.sqrt(8) ),"Tam giac vuong can")
#Test Tam giac deu
def testTamGiacDeu1(self):
self.assertEqual(triangle.detect_triangle(2**-30,2**-30,2**-30),"Tam giac deu")
def testTamGiacDeu2(self):
self.assertEqual(triangle.detect_triangle(7,7,7),"Tam giac deu")
def testTamGiacDeu3(self):
self.assertEqual(triangle.detect_triangle(2**32-2,2**32-2,2**32-2),"Tam giac deu")
# Test tam giac thuong
def testTamGiacThuong1(self):
self.assertEqual(triangle.detect_triangle(2,5,6),"Tam giac thuong")
def testTamGiacThuong2(self):
self.assertEqual(triangle.detect_triangle(2**32-2, 2**3, 2**32-1),"Tam giac thuong")
def testTamGiacThuong3(self):
self.assertEqual(triangle.detect_triangle(7.3,6.1,8.9),"Tam giac thuong")
def testTamGiacThuong4(self):
self.assertEqual(triangle.detect_triangle(2,5,6),"Tam giac thuong")
#Test khong phai tam giac
def testNotTriangle1(self):
self.assertEqual(triangle.detect_triangle(8.0,3.4,2**32-2),"Khong phai tam giac")
def testNotTriangle2(self):
self.assertEqual(triangle.detect_triangle(2,3,7),"Khong phai tam giac")
def testNotTriangle3(self):
self.assertEqual(triangle.detect_triangle(0.12,2**15,2**30),"Khong phai tam giac")
def testNotTriangle4(self):
self.assertEqual(triangle.detect_triangle(math.sqrt(6),math.sqrt(100),2),"Khong phai tam giac")
#Test Tam giac can
def testTamGiacCan1(self):
self.assertEqual(triangle.detect_triangle(2,2,3),"Tam giac can")
def testTamGiacCan2(self):
self.assertEqual(triangle.detect_triangle(7.13, 7.13, 10),"Tam giac can")
def testTamGiacCan3(self):
self.assertEqual(triangle.detect_triangle(math.sqrt(7),math.sqrt(7),3),"Tam giac can")
if __name__ == '__main__':
unittest.main()
| [
"daylodes@gmail.com"
] | daylodes@gmail.com |
1dd5a34e612e0492339d21754f8ab58d9faef979 | 08ed873bba09a3874651801c4a86b36aa8f7abdc | /routing.py | e4264762140344eab484de19281defc2a23eee6f | [] | no_license | geek-proj/socketGameApp | a5a66cc6f2c49d707bb160e645e66a3753bde059 | 580d71d1c623debedb7085ab6e7a131d7f448233 | refs/heads/master | 2022-11-16T00:29:30.715088 | 2020-07-14T10:02:21 | 2020-07-14T10:02:21 | 279,547,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.security.websocket import AllowedHostsOriginValidator
from django.conf.urls import url
from project.consumers import ChatConsumer,EnterConsumer
application = ProtocolTypeRouter({
'websocket':AllowedHostsOriginValidator(
AuthMiddlewareStack(
URLRouter([
url(r"^room/",ChatConsumer),
url(r"^room2/",EnterConsumer)
])
)
)
}) | [
"joha-2001@mail.ru"
] | joha-2001@mail.ru |
7208be6a9eb3a77a2d42bd26b5ffea973ce3e40e | f725b5a04a179d7259b2a6d51213a78425515935 | /venv/Scripts/easy_install-3.7-script.py | 5992d027fa011e01843869793012e4d1a317457d | [] | no_license | MoSedky/Football_Prediction- | a67aeaacd38afea8b096e2f262b5476bd1190749 | 329842f7df2c85ddb53501f917673c49cb716fda | refs/heads/master | 2020-05-02T15:38:05.934992 | 2019-03-30T14:48:16 | 2019-03-30T14:48:16 | 178,047,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!C:\Users\sedky\PycharmProjects\Udemy_1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"sedky.cs@gmail.com"
] | sedky.cs@gmail.com |
8930c718988ec9ff9e2fa05fd2fb822d8a01ce61 | b60804cc2c6379b54f9473e0b5c88054b6ab5789 | /TurtleOne.py | 03b2248ec8876bb67209eacf5cfe23ace98e6c8e | [] | no_license | kumaren14/Kumaren-MyFirstTurtle | a605ac6dab599046624979729e239ec187ff9fb6 | 2b71b962e9bd4ae3ea7de0e4df975ad618dcf43d | refs/heads/master | 2020-07-19T08:28:32.650887 | 2019-09-09T20:58:55 | 2019-09-09T20:58:55 | 206,410,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | import turtle
turtle.shape("turtle")
turtle.speed(7)
turtle.pensize(5)
turtle.color("honeydew3")
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.right(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.exitonclick() | [
"100157451@lubbockisd.net"
] | 100157451@lubbockisd.net |
2302c5232d04a5a863ae16ffe06d5ca30271fc71 | 52c79a87b9c6375d9563f995ae4624dee564e88c | /model/bot/server.py | dca303dc6828186fbc1f5cdaa558924dee4e770b | [] | no_license | doobeh/scudbot | 54733fadfe2fe6fb20728272d9726d73776e5ab0 | 3fcc16d115ba456b0db6a00725eee64d5b22bd51 | refs/heads/master | 2021-01-10T19:03:12.440183 | 2013-03-26T15:27:49 | 2013-03-26T15:27:49 | 2,173,968 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | from database import Base, ASCII
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
class Server(Base):
__tablename__ = 'server'
network_name = Column(String(100), ForeignKey('network.name'))
address = Column(String(100), primary_key=True)
port = Column(Integer())
isSSL = Column(Boolean(), default=False)
def __init__(self,network_name,address,port=6667,isSSL=False):
self.network_name = network_name
self.port = 6667 if port == None else port
self.address = address
self.isSSL = isSSL
def __str__(self):
if(self.isSSL):
return '%s: SSL %s:%d' % (self.network_name, self.address,self.port)
else:
return '%s: TCP %s:%d' % (self.network_name, self.address,self.port)
| [
"PintSizedCat@gmail.com"
] | PintSizedCat@gmail.com |
c6f80b6fcff9d820146a6c798e6abee898629364 | 4fb5eb0a9a24fa5c112a4ebc854ee2604b04adda | /python/oanda/models/calculated_trade_state.py | 996c9bbd4d0e98b92e6550ccd12dbbf12aedeb73 | [
"MIT"
] | permissive | KoenBal/OANDA_V20_Client | ed4c182076db62ecf7a216c3e3246ae682300e94 | e67b9dbaddff6ed23e355d3ce7f9c9972799c702 | refs/heads/master | 2020-03-27T20:42:25.777471 | 2019-12-02T15:44:06 | 2019-12-02T15:44:06 | 147,088,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,967 | py | # coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: api@oanda.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CalculatedTradeState(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'unrealized_pl': 'str',
'margin_used': 'str'
}
attribute_map = {
'id': 'id',
'unrealized_pl': 'unrealizedPL',
'margin_used': 'marginUsed'
}
def __init__(self, id=None, unrealized_pl=None, margin_used=None): # noqa: E501
"""CalculatedTradeState - a model defined in Swagger""" # noqa: E501
self._id = None
self._unrealized_pl = None
self._margin_used = None
self.discriminator = None
if id is not None:
self.id = id
if unrealized_pl is not None:
self.unrealized_pl = unrealized_pl
if margin_used is not None:
self.margin_used = margin_used
@property
def id(self):
"""Gets the id of this CalculatedTradeState. # noqa: E501
The Trade's ID. # noqa: E501
:return: The id of this CalculatedTradeState. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CalculatedTradeState.
The Trade's ID. # noqa: E501
:param id: The id of this CalculatedTradeState. # noqa: E501
:type: str
"""
self._id = id
@property
def unrealized_pl(self):
"""Gets the unrealized_pl of this CalculatedTradeState. # noqa: E501
The Trade's unrealized profit/loss. # noqa: E501
:return: The unrealized_pl of this CalculatedTradeState. # noqa: E501
:rtype: str
"""
return self._unrealized_pl
@unrealized_pl.setter
def unrealized_pl(self, unrealized_pl):
"""Sets the unrealized_pl of this CalculatedTradeState.
The Trade's unrealized profit/loss. # noqa: E501
:param unrealized_pl: The unrealized_pl of this CalculatedTradeState. # noqa: E501
:type: str
"""
self._unrealized_pl = unrealized_pl
@property
def margin_used(self):
"""Gets the margin_used of this CalculatedTradeState. # noqa: E501
Margin currently used by the Trade. # noqa: E501
:return: The margin_used of this CalculatedTradeState. # noqa: E501
:rtype: str
"""
return self._margin_used
@margin_used.setter
def margin_used(self, margin_used):
"""Sets the margin_used of this CalculatedTradeState.
Margin currently used by the Trade. # noqa: E501
:param margin_used: The margin_used of this CalculatedTradeState. # noqa: E501
:type: str
"""
self._margin_used = margin_used
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CalculatedTradeState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"koen.bal@gmail.com"
] | koen.bal@gmail.com |
018ee3bc438ee98a5db180f60d32d8fc7b89183b | 3f9ce8ee23132e3c240fa86b4467888efaea00dc | /media/base.py | 366c209575111ddc6398c50b8d62c45247976401 | [] | no_license | ShawnYi5/logic_service | 700263a027c7639dc4a396a6fd118499160c5db5 | 868329cbdb94718316db6a8b633f623183dc55b5 | refs/heads/master | 2021-06-17T13:33:45.314931 | 2019-08-22T02:59:59 | 2019-08-22T02:59:59 | 203,698,877 | 0 | 0 | null | 2021-04-30T20:53:13 | 2019-08-22T02:25:03 | Python | UTF-8 | Python | false | false | 33,770 | py | import threading
import xlogging
import os
import datetime
import json
import copy
import uuid
import shutil
import sys
import collections
import time
import errno
try:
from . import tape_librarian
from .models import MediaTaskRecord
except Exception:
import tape_librarian
from models import MediaTaskRecord
status = [
(0, 'successful'),
(-1, 'error'), # 表示出错,见错误信息。
(-2, 'need waite'), # 表示media_rw_obj对象不足,需要释放之前的所有media_rw_obj才能用。
(-3, 'busy'), # 表示设备繁忙。
]
class MBaseException(Exception):
def __init__(self, msg, debug, code):
self.msg = msg
self.debug = debug
self.code = code
def __str__(self):
return '{}:{} {} {}'.format(self.__class__.__name__, self.msg, self.debug, self.code)
_logger = xlogging.getLogger('tape_r')
class MediaTarget(object):
def __init__(self, media_uuid, media_data):
self.media_uuid = copy.copy(media_uuid) # 要跟子类共用。
self.media_data = copy.copy(media_data) # 要跟子类共用。
self.align_size = 64 * 1024
self.__lock = threading.RLock() # 不能重写。
self.writetask = False
self.__task_info = None # 不能重写。
self.__taskext = dict() #
self.__handles = list() # 不能重写。
self.__file_uuids_list = list() # 不能重写。
self.__uuid_used_check = dict()
self.__task_size = 0 # 不能重写。
self.__filecount = 0
self.__successful = False
self.__task_recordInfo = None
self.__readcache = None
self.__readcache_len = 0
self.__write_error = 0
def clean_task(self):
self.writetask = False
self.__task_info = None # 不能重写。
self.__taskext = dict() #
self.__handles = list() # 不能重写。
self.__file_uuids_list = list() # 不能重写。
self.__uuid_used_check = dict()
self.__task_size = 0 # 不能重写。
self.__filecount = 0
self.__successful = False
self.__task_recordInfo = None
self.__readcache = None
self.__readcache_len = 0
self.__write_error = 0
def media_start_task(self, task_info):
pass
def media_finish_task(self, is_successful):
pass
def media_get_write_handle(self, file_uuid):
pass
def media_get_read_handle(self, file_name_uuid):
pass
def media_write(self, fd, data):
pass
def media_read(self, fd, size):
pass
def media_close(self, fd):
pass
def start_task(self, task_info):
"""
:param task_info: 包含此次任务需要写入的大小 {'size':1111, 'task_uuid':'', 'task_date':''}
:return:
"""
with self.__lock:
if self.__task_info is None:
_logger.info('LocalMediaTarget start task, task info:{}'.format(task_info))
self.clean_task()
if True:
# try:
self.media_start_task(task_info)
self.__task_info = task_info
self.writetask = True
self.new_task_record()
# except Exception as e:
# raise MBaseException('任务开始失败', 'start task fail, {}'.format(e), 301)
# else:
return 0, ''
else:
return -1, task_info # 正在任务
def get_write_handle(self, file_uuid):
with self.__lock:
if self.__uuid_used_check.get(file_uuid, None) is not None:
raise MBaseException('文件重复添加', 'file has in handles', 300)
if True:
# try:
fd, size = self.media_get_write_handle(file_uuid)
if -1 == fd:
return fd, size
self.__write_error = 0
self.__uuid_used_check[file_uuid] = fd
fdIndex = self.__filecount
self.__filecount = self.__filecount + 1
self.__handles.append(fd)
self.__file_uuids_list.append(file_uuid)
if self.__handles[fdIndex] != fd:
raise MBaseException('internal error 1', 'internal error 1', 302)
if self.__file_uuids_list[fdIndex] != file_uuid:
raise MBaseException('internal error 2', 'internal error 2', 303)
# except Exception as e:
# raise MBaseException('获取写句柄失败', 'get write fail, {}'.format(e), 304)
return fdIndex, size
def get_read_handle(self, file_info):
with self.__lock:
if self.__task_info is None:
_logger.info('LocalMediaTarget get_read_handle:{}'.format(file_info))
self.__task_info = file_info
self.writetask = False
return self.media_get_read_handle(file_info), ''
else:
return -1, self.__task_info
def write(self, fdIndex, data):
if fdIndex < 0 or fdIndex >= len(self.__handles):
_logger.info('media_write error fdIndex:{},size:{}'.format(fdIndex, len(data)))
self.__write_error = -1
return -1, 'error fd'
self.__task_size = self.__task_size + len(data)
_write_err, info = self.media_write(self.__handles[fdIndex], data)
if _write_err != 0:
self.__write_error = -1
return _write_err, info
def read(self, fd, size):
# _logger.info('MediaTarget read(fd:{},size:{})'.format(fd, size))
if size <= self.__readcache_len:
# in cache
retbs = self.__readcache[0:size]
self.__readcache = self.__readcache[size:self.__readcache_len]
self.__readcache_len -= size
_logger.info('MediaTarget read(fd:{},size:{}) return {}'.format(fd, size, len(retbs)))
return 0, retbs
need_size = size - self.__readcache_len
readsize = (need_size + self.align_size - 1) // self.align_size * self.align_size
ret, newbs = self.media_read(fd, readsize)
if 0 != ret:
return ret, newbs
if self.__readcache_len != 0:
# 之前有数据。合并使用。
retbs = self.__readcache + newbs[:need_size]
self.__readcache_len = readsize - need_size
else:
# 全新的数据。
retbs = newbs[:size]
self.__readcache_len = readsize - size
if self.__readcache_len != 0:
# 还有数据
self.__readcache = newbs[readsize - self.__readcache_len:]
# 因为磁带库只能块对其读。
# _logger.info('MediaTarget read(fd:{},size:{}) return {}'.format(fd, size, len(retbs)))
return 0, retbs
def close(self, fdIndex):
_logger.info('start close fdIndex:{}'.format(fdIndex))
if self.writetask:
if fdIndex < 0:
return
file_uuid = self.__file_uuids_list[fdIndex]
close_info = self.media_close(self.__handles[fdIndex])
if self.__write_error == 0:
# 只有成功,才会记录。
_logger.info('new fdIndex:{}'.format(fdIndex))
close_info['fdIndex'] = fdIndex
self.__taskext[file_uuid] = close_info
# self.update_current_task_ext()
# 多增加一个文件,是没有必要写数据库记录的。只有在最后写了成功标记后,才有必要update
else:
_logger.info('skip fdIndex:{}'.format(fdIndex))
else:
try:
self.media_close(fdIndex)
finally:
self.__task_info = None
self.clean_task()
_logger.info('end close fdIndex:{}'.format(fdIndex))
def finish_task(self, is_successful):
if not self.writetask:
return
with self.__lock:
self.__successful = is_successful
try:
self.update_current_task_ext()
except Exception as e:
_logger.error("update_current_task_ext error:{}".format(e))
self.__task_recordInfo = None
try:
self.media_finish_task(is_successful)
except Exception as e:
_logger.error("media_finish_task error:{}".format(e))
_task_ext = self.__taskext
self.clean_task()
return _task_ext
def new_task_record(self):
self.__task_recordInfo = MediaTaskRecord.objects.create(
production_date=self.__task_info['task_date'], # 产生日期,统一由上层传入。
media_uuid=self.media_uuid, # 媒体库的uuid
task_ext_inf=json.dumps(self.__taskext), # task扩展信息
occupy_size=0,
task_uuid=self.__task_info['task_uuid'])
return
def update_current_task_ext(self):
if not self.writetask or self.__task_recordInfo == None:
return
self.__task_recordInfo.task_ext_inf = json.dumps(self.__taskext)
self.__task_recordInfo.file_count = self.__filecount
self.__task_recordInfo.occupy_size = self.__task_size
self.__task_recordInfo.successful = self.__successful
self.__task_recordInfo.save()
return
def get_last_success_task(self):
try:
return MediaTaskRecord.objects.filter(
MediaTaskRecord.media_uuid == self.media_uuid,
MediaTaskRecord.successful == True
).order_by(MediaTaskRecord.id)[-1]
except IndexError as e:
return None
def get_first_valid_task(self):
try:
return MediaTaskRecord.objects.filter(
MediaTaskRecord.media_uuid == self.media_uuid,
MediaTaskRecord.successful == True,
MediaTaskRecord.overwritedata == False
).order_by(MediaTaskRecord.id)[0]
except IndexError as e:
return None
def update_all_task_life_cycle(self, crt_date):
# 跟新所有任务,该打删除标记的打删除标记。crt_date是当前的时间。
return
class Tape_Group_Class(object):
def __init__(self, tapes_list):
self.__tapesOrdered = collections.OrderedDict()
_tapes = tapes_list
_tapedict = dict()
for _k, _v in _tapes.items():
_crt_id = int(_k)
_tapedict[_crt_id] = _v
_list_key = sorted(_tapedict)
for _key in _list_key:
self.__tapesOrdered[_key] = _tapedict[_key]
def get_tape_count(self):
return len(self.__tapesOrdered)
def get_next_volume(self, current_volumeID):
if 0 == len(self.__tapesOrdered):
return -1, None
_bFoundOut = False
for k, v in self.__tapesOrdered.items():
if _bFoundOut:
return copy.copy(k), copy.copy(v)
if current_volumeID == k:
_bFoundOut = True
continue
for k, v in self.__tapesOrdered.items():
return copy.copy(k), copy.copy(v)
class TapeMediaHardwareAPI(object):
def __init__(self, media_data):
self.media_data = media_data
# 下面是设备相关的。
self.__tape_devname = None
self.__tape_devobj = None
self.__mc_devname = None
self.__mc_devobj = None
self.__mc_drvID = -1
self.__hardware_status = False
def init_hardware_device(self):
if self.__hardware_status:
return
self.__hardware_status = True
mc_tape = tape_librarian.mc_and_tape_mgr()
_tape_drv_sn = self.media_data['driver']
self.__tape_devname = mc_tape.get_tape_devicename(_tape_drv_sn)
if self.__tape_devname == None:
raise MBaseException("不能找到带库驱动器{}".format(_tape_drv_sn), "can not found tape drive {}".format(_tape_drv_sn),
sys._getframe().f_lineno)
_mc_link = self.media_data['link']
self.__mc_devname, self.__mc_drvID = mc_tape.get_mc_devicename(_tape_drv_sn, _mc_link)
if self.__mc_devname == None:
raise MBaseException("不能找到机械臂对应的驱动器{}".format(_tape_drv_sn),
"can not found Medium Changer device{}".format(_tape_drv_sn),
sys._getframe().f_lineno)
self.__tape_devobj = tape_librarian.tape_dev_mgr(self.__tape_devname)
self.__tape_devobj.update_tape_status()
self.__mc_devobj = tape_librarian.Medium_Changer_devmgr(self.__mc_devname)
self.__mc_devobj.update_Medium_Changer()
def get_tapename(self):
return self.__tape_devname[0]
def get_mcname(self):
return self.__mc_devname
def try_check_drive_status(self):
for _ in range(30):
try:
self.__tape_devobj.update_tape_status()
if self.__tape_devobj.Ready == tape_librarian.const_yes and self.__tape_devobj.online:
return
else:
_str = "tapeDev{} ready:{} online:{}".format(
self.__tape_devname, self.__tape_devobj.Ready, self.__tape_devobj.online)
_logger.info(_str)
except:
pass
time.sleep(1)
def try_rewind_and_set_block_size(self, _align_size):
for _ in range(2):
try:
self.__tape_devobj.update_tape_status()
if self.__tape_devobj.Ready == tape_librarian.const_yes and self.__tape_devobj.online:
self.__tape_devobj.set_blksize(_align_size)
return
else:
_str = "tapeDev{} ready:{} online:{}".format(
self.__tape_devname, self.__tape_devobj.Ready, self.__tape_devobj.online)
_logger.info(_str)
self.__tape_devobj.set_blksize(_align_size)
except Exception as e:
str = "try_rewind_and_set_block_size error:{}".format(e)
_logger.error(str)
try:
self.__tape_devobj.tape_rewind()
except Exception as e:
str = "tape_rewind error:{}".format(e)
_logger.error(str)
time.sleep(1)
def load_Volume(self, _crt_VolumeTag, _align_size):
_logger.info("load_Volume({})".format(_crt_VolumeTag))
_load_status, _load_new_volume = self.__mc_devobj.load_Volume(_crt_VolumeTag, self.__mc_drvID)
_logger.info("load_Volume return {}".format(_load_new_volume))
# 这里可能有的坑:
# 1、磁带可能不能兼容这个驱动器,任何操作都可能被卡住。
# 2、磁带没有倒带前,不能用,直接报错。
# 3、磁带可能busy,需要等待一会。
# 所以,处理流程如下:
# 1、如果 noready, ,等待30秒,
# 2、segblksize,如果失败,直接倒带。重试3次。
self.try_check_drive_status()
self.try_rewind_and_set_block_size(_align_size)
if _load_new_volume:
_logger.info("new Volume rewind")
try:
self.__tape_devobj.tape_rewind()
self.try_rewind_and_set_block_size(_align_size)
except Exception as e:
_logger.error("tape({}) rewind error:{}".format(self.__tape_devname, e))
return
def seek(self, _crt_FileNO):
return self.__tape_devobj.seek(_crt_FileNO)
class TapeSpaceMgr(object):
def __init__(self):
self.__all = list()
pass
def append_free_space(self, _vol_ID, _vol_Tag, _file_ID):
_one = dict()
_one[r'volumeid'] = copy.copy(_vol_ID)
_one[r'volumetag'] = copy.copy(_vol_Tag)
_one[r'volfileid'] = copy.copy(_file_ID)
self.__all.append(_one)
def use_new_volume(self):
_logger.info("use_new_volume({})")
self.__all.pop(0)
def malloc_free_space(self):
if 0 == len(self.__all):
return None
_first_space = copy.copy(self.__all[0])
self.__all[0]['volfileid'] = self.__all[0]['volfileid'] + 1
return _first_space
def get_free_bytes(self):
if 0 == len(self.__all):
return 0
# 计算空间:
return 1024 * 1024 * 1024 * 1024 * 1024
# http://172.16.1.11/AIO/project_main/issues/4087
class TapeMediaTarget(MediaTarget):
def __init__(self, media_uuid, media_data):
super(TapeMediaTarget, self).__init__(media_uuid, media_data)
self.const_max_id = 999999999 # 1G一个文件,最大就是999p。
self.const_first_fileNO_per_vol = 0
self.__crt_VolumeID = -1
self.__crt_VolumeTag = None
self.__crt_FileNO = -1
self.__task_info = None
self.__fd = 0
self.__tape_space_mgr = TapeSpaceMgr()
self.__tape_drive_locker = None
self.__max_size_per_write = 2 * 1024 * 1024
self.__tapeGroupMgrObj = Tape_Group_Class(self.media_data['tapas'])
self.__hwDev = TapeMediaHardwareAPI(self.media_data)
def __clear_tape_task(self):
# 跟任务相关,要重置的。
self.__crt_VolumeID = -1
self.__crt_VolumeTag = None
self.__crt_FileNO = -1
self.__task_info = None
self.__fd = 0
self.__tape_space_mgr = TapeSpaceMgr()
if self.__tape_drive_locker != None:
_temp_locker = self.__tape_drive_locker
self.__tape_drive_locker = None
_temp_locker = None # 最后一个这样来释放?应该没问题吧。
def get_last_success_valid_file(self):
_lastTask = self.get_last_success_task()
if None == _lastTask:
return -1, None, -1
_logger.info("get_last_success_task _task_ext_inf({})".format(_lastTask.task_ext_inf))
_task_ext_inf = json.loads(_lastTask.task_ext_inf)
_maxFileID = -1
_lastFileExt = None
for _k, _v in _task_ext_inf.items():
if _v['fdIndex'] > _maxFileID:
_maxFileID = _v['fdIndex']
_lastFileExt = _v
return _lastFileExt['volumeid'], _lastFileExt['volumetag'], _lastFileExt['volfileid']
def get_fist_valid_file(self):
_firstTask = self.get_first_valid_task()
if None == _firstTask:
return -1, None, -1
_logger.info("get_fist_valid_file _task_ext_inf({})".format(_firstTask.task_ext_inf))
_task_ext_inf = json.loads(_firstTask.task_ext_inf)
_minFileID = self.const_max_id
_firstFileExt = None
for _k, _v in _task_ext_inf.items():
if _v['fdIndex'] < _minFileID:
_minFileID = _v['fdIndex']
_firstFileExt = _v
return _firstFileExt['volumeid'], _firstFileExt['volumetag'], _firstFileExt['volfileid']
# 初始化空间。
def init_task_space(self):
# 查第一块可用的空间。从上一个成功的文件 + 1
_last_VolumeID, _last_VolumeTag, _last_FileNO = self.get_last_success_valid_file()
# 从last + 1 到, first的 Volume.
if None == _last_VolumeTag:
# 从第一块开始用。
_start_free_VolumeID, _start_free_VolumeTag = self.__tapeGroupMgrObj.get_next_volume(self.const_max_id)
if _start_free_VolumeTag is None:
# 没有磁带?
raise MBaseException("无磁带可用!", "no tape", sys._getframe().f_lineno)
_start_free_fileNO = self.const_first_fileNO_per_vol
else:
_start_free_VolumeID = _last_VolumeID
_start_free_VolumeTag = _last_VolumeTag
_start_free_fileNO = _last_FileNO + 1
# 查最后一个用的磁带。
_end_VolumeID, _end_VolumeTag, _end_FileNO = self.get_fist_valid_file()
if _end_VolumeID == -1 or _end_VolumeTag == None:
# 如果还没有任务,就把开始当成结束。
_end_VolumeID = _start_free_VolumeID
# 加入磁带:
self.__tape_space_mgr.append_free_space(_start_free_VolumeID, _start_free_VolumeTag, _start_free_fileNO)
_current__VolumeID = _start_free_VolumeID
while True:
_nextVolID, _next_VolTag = self.__tapeGroupMgrObj.get_next_volume(_current__VolumeID)
if _nextVolID == _end_VolumeID:
# 不能用有任务的volume..
break
self.__tape_space_mgr.append_free_space(_nextVolID, _next_VolTag, self.const_first_fileNO_per_vol)
_current__VolumeID = _nextVolID
return
def media_start_task(self, task_info):
_logger.info("media_start_task({})".format(task_info))
self.__clear_tape_task()
self.__tape_drive_locker = tape_librarian.get_tape_drive_lock()
self.__hwDev.init_hardware_device()
if self.__tapeGroupMgrObj.get_tape_count() <= 1:
# 只有一盘磁带时:
raise MBaseException("必须有2盘磁带及以上", "tape count{}".format(self.__tapeGroupMgrObj.get_tape_count()),
sys._getframe().f_lineno)
self.init_task_space()
self.__task_info = copy.copy(task_info)
__free = self.__tape_space_mgr.get_free_bytes()
__need_size = task_info['size']
if __free < __need_size:
raise MBaseException("磁带无可用空间!", "no free spaces!{} < {} ".format(__free, __need_size),
sys._getframe().f_lineno)
return
def __open_tape(self):
try:
self.__hwDev.load_Volume(self.__crt_VolumeTag, self.align_size)
except:
# 磁带不能用了,
str = "设备{}加载磁带失败({})".format(self.__hwDev.get_mcname(), self.__crt_VolumeTag)
MBaseException(str, "load volume failed", sys._getframe().f_lineno)
_logger.error(str)
raise Exception(str)
crt_fileNO = self.__hwDev.seek(self.__crt_FileNO)
if self.__crt_FileNO != crt_fileNO:
str = "{}移动磁带失败(old: {} != current:{})".format(self.__hwDev.get_mcname(), self.__crt_FileNO, crt_fileNO)
MBaseException(str, 'seek tape failed', sys._getframe().f_lineno)
log.error(str)
# raise Exception(str) 以下面打开文件为判断标准
try:
if self.writetask:
mode = os.O_WRONLY | os.O_APPEND | os.O_SYNC
else:
mode = os.O_RDONLY
self.__fd = os.open(self.__hwDev.get_tapename(), mode, 0o666)
except Exception as e:
str = "os.open{} error:{}".format(self.__hwDev.get_tapename(), e)
MBaseException("打开磁带机驱动器({})设备失败".format(self.__hwDev.get_tapename()), str, sys._getframe().f_lineno)
_logger.error(str)
raise Exception(str)
_logger.info("os.open{} success fd:{}".format(self.__hwDev.get_tapename(), self.__fd))
def media_get_write_handle(self, file_uuid):
_logger.info("media_get_write_handle({})".format(file_uuid))
while True:
free_space = self.__tape_space_mgr.malloc_free_space()
if None == free_space:
# 无空间可用了。
return -1, 0
self.__crt_VolumeID = free_space['volumeid']
self.__crt_VolumeTag = free_space['volumetag']
self.__crt_FileNO = free_space['volfileid']
try:
self.__open_tape()
except Exception as e:
# 继续用下一盘磁带。
str = "__open_tape:{} error:{}".format(self.__hwDev.get_tapename(), e)
_logger.error(str)
self.__tape_space_mgr.use_new_volume()
continue
return self.__fd, 4 * 1024 * 1024 * 1024
def media_get_read_handle(self, file_info):
_logger.info("media_get_read_handle({})".format(file_info))
self.__tape_drive_locker = tape_librarian.get_tape_drive_lock()
self.__hwDev.init_hardware_device()
self.__crt_VolumeID = file_info['volumeid']
self.__crt_VolumeTag = file_info['volumetag']
self.__crt_FileNO = file_info['volfileid']
self.__open_tape()
return self.__fd
def media_write(self, fd, data):
if fd != self.__fd:
str = "error fd: {} != self:{}".format(fd, self.__fd)
_logger.error(str)
return -1, str
_blk_start = 0
_blk_end = 0
_len = len(data)
while _len != 0:
_io_size = min(self.__max_size_per_write, _len)
_blk_end = _blk_start + _io_size
try:
_wrdsize = os.write(self.__fd, data[_blk_start:_blk_end])
if _wrdsize > 0:
# 写成功。
_len -= _wrdsize
_blk_start = _blk_start + _wrdsize
continue
else:
_logger.error('os.write(fd:{}) error size: _wrdsize:{} != _io_size:{}'.format(
self.__fd, _wrdsize, _io_size))
self.__tape_space_mgr.use_new_volume()
return errno.ENOSPC, r'ENOSPC'
except IOError as e:
str = 'os.write(fd:{}) error({}) size: _io_size:{}'.format(self.__fd, e.errno, _io_size)
_logger.error(str)
if e.errno == errno.ENOSPC:
self.__tape_space_mgr.use_new_volume()
return errno.ENOSPC, r'ENOSPC'
if e.errno == errno.EINVAL:
# 有的磁带机一次只能写2M,没去查究竟能写多少,就弄一个递减重试。
if self.__max_size_per_write <= 65536:
# 不能比 64k 小了。
return e.errno, str
self.__max_size_per_write /= 2
continue
if e.errno == errno.EINTR or e.errno == errno.EAGAIN:
continue
return -e.errno, str
return 0, 'OK'
def media_read(self, fd, size):
while True:
try:
one_buf = os.read(self.__fd, size)
if size != len(one_buf):
return -1, 'no more'
except IOError as e:
str = 'os.read(fd:{}) error({}) size:{}'.format(fd, e, size)
_logger.error(str)
if e.errno == errno.EINTR or e.errno == errno.EAGAIN:
continue
return -e.errno, str
return 0, one_buf
def media_close(self, fd):
_logger.info("os.close({})".format(self.__fd))
os.close(self.__fd)
vid = self.__crt_VolumeID
vtag = self.__crt_VolumeTag
vfileid = self.__crt_FileNO
self.__fd = 0
if not self.writetask:
# 读取任务的时候,一个读取完成,就释放锁。
_temp_locker = self.__tape_drive_locker
self.__tape_drive_locker = None
_temp_locker = None # 最后一个这样来释放?应该没问题吧。
# 写入任务时,必须等待任务结束后,才能释放锁。
return {'volumeid': vid, 'volumetag': vtag, 'volfileid': vfileid}
def media_finish_task(self, is_successful):
self.__clear_tape_task()
return
class LocalMediaTarget(MediaTarget):
def __init__(self, media_uuid, data):
super(LocalMediaTarget, self).__init__(media_uuid, data)
self._out_path = self.media_data['path']
self._out_dir = None
self.path = None
def media_start_task(self, task_info):
self._out_dir = os.path.join(self._out_path, uuid.uuid4().hex)
os.makedirs(self._out_dir)
def media_finish_task(self, is_successful):
if not is_successful:
shutil.rmtree(self._out_dir, ignore_errors=True)
def media_get_write_handle(self, file_uuid):
self.path = os.path.join(self._out_dir, file_uuid)
fd = open(self.path, 'wb')
return fd, 1 * 1024 * 1024 * 1024
def media_get_read_handle(self, file_info):
return open(file_info['path'], 'rb')
def media_write(self, fd, data):
fd.write(data)
return 0, ''
def media_read(self, fd, size):
return 0, fd.read(size)
def media_close(self, fd):
fd.close()
return {'path': self.path}
class MediaTargetManager(object):
"""
管理所有MediaTarget, 提供增删改查
"""
def __init__(self):
self._medias = dict()
self._medias_lock = threading.RLock()
t = threading.Thread(target=self.check_expired, args=(), name='MediaTargetManagerThread')
t.setDaemon(True)
t.start()
def check_expired(self):
while True:
today = datetime.datetime.now().date()
try:
for media_uuid, media_target in self._medias.items():
dead_line = today - datetime.timedelta(days=media_target.media_data['max_days'])
MediaTaskRecord.objects.filter(
MediaTaskRecord.media_uuid == media_uuid,
MediaTaskRecord.successful == True,
MediaTaskRecord.overwritedata == False,
MediaTaskRecord.production_date < dead_line
).update({'overwritedata': True})
except Exception as e:
_logger.error('MediaTargetManager check_expired error:{}'.format(e), exc_info=True)
time.sleep(60)
def add(self, info):
media_uuid = info['media_uuid']
media_type = info['media_type']
with self._medias_lock:
if media_uuid in self._medias:
_logger.error('MediaTargetManager media:{} is already in'.format(media_uuid))
return 0
else:
if media_type == 'tape':
self._medias[media_uuid] = TapeMediaTarget(media_uuid, info['info'])
return 0
elif media_type == 'local':
self._medias[media_uuid] = LocalMediaTarget(media_uuid, info['info'])
return 0
else:
return -1
def delete(self):
pass
def get(self, media_uuid):
with self._medias_lock:
return self._medias.get(media_uuid)
def put(self):
pass
def enum_mc_hw_info(self, info):
return tape_librarian.enum_mc_hw_info(info)
def operations(self, json_params):
params = json.loads(json_params)
rev = getattr(self, params['action'])(params['info'])
return json.dumps({'rev': rev})
if __name__ == "__main__":
mtmgr = MediaTargetManager()
__media_data = dict()
__media_data['name'] = 'tape_media_name'
__media_data['link'] = {
"DriveList": [{"DriveSN": "31333130323534303531", "MCSN": "30304c3255373856393532385f4c4c30", "MCBoxID": 0}]}
__media_data['driver'] = '31333130323534303531'
__media_data['cycle'] = 'cycle'
__media_data['max_days'] = 33
__media_data['cycle_type'] = 'cycle_type'
__media_data['tapas'] = {'1': 'DH1397L4', '2': "DH1398L4", '11': "DH1399L4"}
__inf = dict()
__inf['media_uuid'] = 'test_media_uuid'
__inf['media_type'] = 'tape'
__inf['info'] = __media_data
mtmgr.add(__inf)
tapeDev = mtmgr.get('test_media_uuid')
taskinfo = dict()
taskinfo['size'] = 123456789
taskinfo['task_uuid'] = uuid.uuid4().hex
taskinfo['task_date'] = datetime.datetime.now() # ('%Y_%m_%dT%H_%M_%S')
tapeDev.start_task(taskinfo)
for i in range(3):
__mfd, wrsize = tapeDev.get_write_handle(uuid.uuid4().hex)
bs = bytearray(65536)
ret, err = tapeDev.write(__mfd, bs)
tapeDev.close(__mfd)
__mfd, wrsize = tapeDev.get_write_handle(uuid.uuid4().hex)
s = 0
wrsize = 1000000
while s < wrsize:
bs = bytearray(65536)
ret, err = tapeDev.write(__mfd, bs)
if ret != 0:
break
s = s + 65536
tapeDev.close(__mfd)
success_task_extinfo = tapeDev.finish_task(True)
_fdIndex = 0
_maxfdIndex = -1
for _k, _v in success_task_extinfo.items():
_fdIndex = _v['fdIndex']
if _fdIndex > _maxfdIndex:
_maxfdIndex = _fdIndex
_maxfdIndex = _maxfdIndex + 1
for _i in range(_maxfdIndex):
for _k, _v in success_task_extinfo.items():
_fdIndex = _v['fdIndex']
if _fdIndex == _i:
_fileext = _v
fd = tapeDev.get_read_handle(_fileext)
ret, bs = tapeDev.read(fd, 9)
if ret != 0:
print("read error")
else:
print("read size:{}", len(bs))
ret, bs = tapeDev.read(fd, 1111)
if ret != 0:
print("read error")
else:
print("read size:{}", len(bs))
ret, bs = tapeDev.read(fd, 2222)
if ret != 0:
print("read error")
else:
print("read size:{}", len(bs))
tapeDev.close(fd)
| [
"yi.shihong@aliyun.com"
] | yi.shihong@aliyun.com |
a3ee49591f6deed0087d42e9b632c8a6a0a2ecc7 | 7a81a6cb525b00716bfe8594d250d46e01a09d5b | /Acomodar Fotos/changeName-FullyDiferent.py | f3edaf29b4c99a2fc3f2dc89b1fd4a2c89502ce2 | [] | no_license | LichaDC/GitProgramming | 84de8543190269bbcbde08b538b51ceba85ccb1f | e99c30bcabee8350406d708943f717ab74739ebd | refs/heads/master | 2020-03-28T07:54:03.684310 | 2018-11-15T20:23:41 | 2018-11-15T20:23:41 | 147,595,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,919 | py | from PIL import Image
import os, time
months = {
'01': '01 Enero',
'02': '02 Febrero',
'03': '03 Marzo',
'04': '04 Abril',
'05': '05 Mayo',
'06': '06 Junio',
'07': '07 Julio',
'08': '08 Agosto',
'09': '09 Septiembre',
'10': '10 Octubre',
'11': '11 Noviembre',
'12': '12 Diciembre'
}
schemeP = "'IMG-{} {} {} - {}{}'.format(d[0], d[1], d[2], f'{i:04}', ext)"
def monthToNum(shortMonth):
return{
'Jan' : '01',
'Feb' : '02',
'Mar' : '03',
'Apr' : '04',
'May' : '05',
'Jun' : '06',
'Jul' : '07',
'Aug' : '08',
'Sep' : '09',
'Oct' : '10',
'Nov' : '11',
'Dec' : '12'
}[shortMonth]
def date(path):
try:
img = Image.open(path)
info = img._getexif()[36867]
info = info.replace(':', ' ')
info = info.split()
except:
date = int(os.path.getmtime(path))
info = time.ctime(date)
info = info.replace(':', ' ')
info = info.split()
info.remove(info[0])
info.insert(0, info[5])
info.pop()
info[1] = monthToNum(info[1])
return info
def newer(path1, path2): # is path1 newer than path2 ?
d1 = date(path1)
d2 = date(path2)
if d1[3] == d2[3]: # Same hour
if d1[4] == d2[4]: # Same minute
if d1[5] >= d2[5]: # Compare seconds
if d1[5] == d2[5]:
return False
else:
return True
else:
return False
else: # Diferent minute
if d1[4] > d2[4]: # Compare minutes
return True
else:
return False
else: # Diferent hours
if d1[3] > d2[3]: # Compare hours
return True
else:
return False
def changeName(pathFile, ext):
path, file = os.path.split(pathFile)
d = date(pathFile)
i = 1
while True:
correctName = os.path.join(path, eval(schemeP))
if not os.path.isfile(correctName): # Check if doesn't exists a file with the correct name.
print('[ → ] - I renamed', file, ' by ', eval(schemeP))
os.replace(pathFile, correctName)
break
elif correctName == pathFile:
pass
break
else:
if newer(correctName, pathFile): # Check if it's older than the correct named file. (correctName is newer)
j = 1
while True:
if not os.path.isfile(os.path.join(path, '{}-{}{}'.format(eval(schemeP)[:-4], j, ext))): # Check if the temporal name doesn't exist
print('[ → ] - I renamed', eval(schemeP), ' by {}-{}{}'.format(eval(schemeP)[:-4], j, ext))
os.replace(correctName, os.path.join(path, '{}-{}{}'.format(eval(schemeP)[:-4], j, ext))) # Give temporal name to the correct named file
global unordered
unordered = True
break
j += 1
print('[ → ] - I renamed', file, ' by ', eval(schemeP))
os.replace(pathFile, correctName)
break
else:
i += 1
# --- MAIN --- #
directory = os.getcwd()
unordered = True
while unordered:
unordered = False
for dir in os.scandir(directory):
dir = dir.path
if dir[-4:] == '.JPG' or dir[-4:] == '.jpg' or dir[-4:] == '.MPG' or dir[-4:] == '.mpg':
if dir[-4:] == '.JPG' or dir[-4:] == '.jpg':
ext = '.JPG'
else:
ext = '.MPG'
if not unordered:
changeName(dir, ext)
else:
break
print()
print("| ----- - -----[ Every photo and video has the correct name ]----- - ----- |")
| [
"lisandrodc06@gmail.com"
] | lisandrodc06@gmail.com |
0abd01b4de8d1bcc1e337fe1c49bb5dbe7f3e2de | 81dbd272716480e7e8f21a9d2dbcbaf2c1c42398 | /src/app.py | 5c649fd90fd00602f45a9026a499a705951c4ed2 | [] | no_license | MatthieuMayer/palo_itest | c97c2decb8d53068fbd4f879f66c3b4a547aad04 | 7f9223562339b64440b79dd635b65cc00694c512 | refs/heads/master | 2023-04-05T05:59:11.275808 | 2021-04-07T19:55:53 | 2021-04-07T19:55:53 | 354,943,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | # -*- coding: utf-8 -*-
# ------------------------------------------ Module Imports & Definitions -------------------------------------------- #
# Standard package(s) import ----------------------------------------------------------------------
import flask
from flask import send_file
# Project packages(s) import ----------------------------------------------------------------------
from src.top10_categories import *
from src.paper_keywords import *
# ------------------------------------------- App and Routes Definition ---------------------------------------------- #
# initialise flask app
app = flask.Flask(__name__)
# add route to get top10 categories
@app.route("/top10_categories", methods=["GET"])
def top10_cat():
"""Instantiate Top10 class and compute top10 categories for last year.
:return: string giving top10 categories.
"""
# path to database
db_path = './../data/arxiv.db'
# instantiate Top10 src
top10 = Top10(db_path)
# query appropriate columns on top10 object
top10.query_columns('ARXIV', ['categories', 'versions'])
# extract year from versions dates
top10.extract_version_year()
# compute top10 categories
top10.top10_categories(2020)
# return top 10 categories
return top10.display
@app.route("/keywords_txt/<paper_id>", methods=["GET"])
def kw_txt(paper_id):
"""Start a prediction with the trained model on the given year.
:return: prediction features and labels
"""
# instantiate Keywords src
paper_kw = Keywords(str(paper_id), 20)
# download paper for paper_kw instance
paper_kw.dl_paper()
# create and display wordcloud on paper
paper_kw.wordcloud()
# find main keywords from paper
paper_kw.find_keywords()
# delete downloaded paper
paper_kw.remove_pdf()
# display main keywords from paper
return paper_kw.display
@app.route("/keywords_im/<paper_id>", methods=["GET"])
def kw_im(paper_id):
"""Start a prediction with the trained model on the given year.
:return: prediction features and labels
"""
# instantiate Keywords src
paper_kw = Keywords(str(paper_id), 20)
# download paper for paper_kw instance
paper_kw.dl_paper()
# create and display wordcloud on paper
paper_kw.wordcloud()
# find main keywords from paper
paper_kw.find_keywords()
# delete downloaded paper
paper_kw.remove_pdf()
# display main keywords from paper
return send_file('wordcloud.png', mimetype='image/png')
app.run(host='localhost', port=80)
| [
"matthieu.mayer@sogeti.com"
] | matthieu.mayer@sogeti.com |
9f95168b1eedd2a04d670f60fb12bc9e7240a22c | 3d5e69cd8b889372e41bb1b69d119be9b886a409 | /0034_FindFirstAndLastPositionOfElementInSortedArray/FindFirstAndLastPositionOfElementInSortedArray.py | c1cd7083753d007cd35f170592d4b7009139e7a4 | [] | no_license | trilamsr/LeetCode | f55babf58efa1292c92f65c5d338b95782ac949a | 8b962e2affad32dafc9ab07fd88e596da213a00b | refs/heads/master | 2020-09-06T06:08:25.785595 | 2020-06-11T22:04:00 | 2020-06-11T22:04:00 | 220,347,389 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
GTE = lambda x: target <= nums[x]
LTE = lambda x: target >= nums[x]
lo = lower_bound(nums, target, GTE)
hi = upper_bound(nums, target, LTE)
return [lo, hi] if lo <= hi else [-1,-1]
def lower_bound (nums, target, prop):
lo, hi = 0, len(nums)-1
while lo <= hi:
mid = (lo+hi)//2
if prop(mid):
hi = mid-1
else:
lo = mid+1
return lo
def upper_bound(nums, target, prop):
lo, hi = 0, len(nums)-1
while lo <= hi:
mid = (lo+hi)//2
if prop(mid):
lo = mid +1
else:
hi = mid-1
return hi
def lower_bound(nums, target):
lo, hi = 0, len(nums) - 1
while lo <= hi:
mid = (lo + hi) // 2
if nums[mid] >= target:
hi = mid - 1
else:
lo = mid + 1
return lo
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
lo = lower_bound(nums, target)
hi = lower_bound(nums, target + 1) - 1
return [lo, hi] if lo <= hi else [-1, -1]
'''
small optimization:
[5,7,7,8,8,10]; find 8
so we found lower_bound and got 3
[5,7,7,8,8,10]
^
on our next binary search: we can start from [3] instead of [0]
[5,7,7,8,8,10]
^---^
start from here
# but this is still O(log(N))
# example: [8,8,8,8,8,8,8,8,8,8,8,8,9]
^(start here on the 2nd iteration), but still small optimization
def lower_bound(nums, lo, target):
hi = len(nums) - 1
while lo <= hi:
mid = (lo + hi) // 2
if nums[mid] >= target:
hi = mid - 1
else:
lo = mid + 1
return lo
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
lo = lower_bound(nums, 0, target)
hi = lower_bound(nums, lo, target + 1) - 1
return [lo, hi] if lo <= hi else [-1, -1]
''' | [
"tree@Tree.local"
] | tree@Tree.local |
cb9a875d7dbfc09ee81ee33082a576bec3973a8d | 18ccc4cd6f2937498fd5f487862b1451a1c8cf29 | /Main/entity.py | e78c2f1792e41571cb38fc6a05272751ee60cfd3 | [] | no_license | pei91/FirstRoguelikeGame | 3b4190bf57cbc60c9150b9eb5b888f341681d53f | dc7464d7cb414d79fd3efec3f5393c1f647be059 | refs/heads/master | 2022-12-15T15:04:02.638043 | 2020-09-13T03:44:57 | 2020-09-13T03:44:57 | 295,074,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,554 | py | """
A generic object to represent players, enemies, items, etc.
"""
import tcod as libtcod
import math
from render_functions import RenderOrder
class Entity:
"""
A generic object to represent players, enemies, items, etc.
"""
def __init__(self, x, y, char, color, name, blocks=False,
render_order=RenderOrder.CORPSE, fighter=None, ai=None,
item=None, inventory=None):
self.x = x
self.y = y
self.char = char
self.color = color
self.name = name
self.blocks = blocks
self.render_order = render_order
self.fighter = fighter
self.ai = ai
self.item = item
self.inventory = inventory
if self.fighter:
self.fighter.owner = self
if self.ai:
self.ai.owner = self
if self.item:
self.item.owner = self
if self.inventory:
self.inventory.owner = self
def move(self, dx, dy):
# Move the entity by a given amount
self.x += dx
self.y += dy
def move_towarts(self, target_x, target_y, game_map, entities):
dx = target_x - self.x
dy = target_y - self.y
distance = math.sqrt(dx ** 2 + dy ** 2)
dx = int(round(dx / distance))
dy = int(round(dy / distance))
if not (game_map.is_blocked(self.x + dx, self.y + dy) or
get_blocking_entities_at_location(entities, self.x + dx, self.y + dy)):
self.move(dx, dy)
def move_astar(self, target, entities, game_map):
# Create a FOV map that has the dimensions of the map
fov = libtcod.map_new(game_map.width, game_map.height)
# Scan the current map each turn and set all the walls as unwalkable
for y1 in range(game_map.height):
for x1 in range(game_map.width):
libtcod.map_set_properties(fov, x1, y1, not game_map.tiles[x1][y1].block_sight,
not game_map.tiles[x1][y1].blocked)
# Scan all the objects to see if there are objects that must be navigated around
# Check the object isn't self or the target (so the start and the end points are free)
# The AI class handles the situation if self is next to the target so it will not use this A* functions anyway
for entity in entities:
if entity.blocks and entity != self and entity != target:
# Set the tile as a wall so it must be navigated around
libtcod.map_set_properties(fov, entity.x, entity.y, True, False)
# Allocate a A* path
# The 1.41 is the nomal diagonal cost of moving, could be 0.0 if diagonal prohibited
my_path = libtcod.path_new_using_map(fov, 1.41)
# Compute the path between self's coord and target's
libtcod.path_compute(my_path, self.x, self.y, target.x, target.y)
# Check if the path exists, and in this case, also the path is shorter than 25 tiles
# The path size matters if you want the monster to use alternative longer paths
# (for example through other rooms) if for example the player is in a corridor
# It makes sense to keep path size relatively low to keep the monsters from running around
# the map if there's an alternative path really far away
if not libtcod.path_is_empty(my_path) and libtcod.path_size(my_path) < 25:
# Find the next coord int the computed full path
x, y = libtcod.path_walk(my_path, True)
if x or y:
# Set self's coord to the next path tile
self.x, self.y = x, y
else:
# Keep the old move function as a backup so if no paths
# (for example another monster blocks a corridor)
# it will still try to move towards the player (closer to the corridor opening)
self.move_towarts(target.x, target.y, game_map, entities)
# delete to free memory
libtcod.path_delete(my_path)
def distance_to(self, other):
dx = other.x - self.x
dy = other.y - self.y
return math.sqrt(dx ** 2 + dy **2)
# Loops through the entities, if one is blocking and at (x,y), return it
def get_blocking_entities_at_location(entities, destination_x, destination_y):
for entity in entities:
if entity.blocks and entity.x == destination_x and entity.y == destination_y:
return entity
return None | [
"peichuan@buffalo.edu"
] | peichuan@buffalo.edu |
e2050ae7ec3732efd1bcb13c76ce0d0125fae948 | 8a47a76b839bf9169a39a5563d8fbac09f5b8038 | /graph_properties.py | 0bc7e0a3542ca67feedb04a191bbc67401fc7b85 | [] | no_license | snehass135/Python-Programs | 7f00b858a593706d4abfe7ca57140588d7786ed5 | 044800f8e4a7d313cd90fa16945877d795dda7b3 | refs/heads/master | 2022-12-29T19:14:29.312456 | 2020-10-15T18:11:51 | 2020-10-15T18:11:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,686 | py | import networkx as n
import matplotlib.pyplot as plt
import sys
### Functions ###
def add_nodes(graph):
"""Function to add nodes in the graph:
Parameters - take a list of space separated node names
"""
print("Enter the name of nodes to add to graph: ")
graph.add_nodes_from(list(map(str,input().split())))
return graph
def add_edges(graph):
"""Function to add edges in the graph:
Parameters - take a tuple of space separated nodes that
are to be connected
"""
print("Enter the name of nodes to connect in graph: ")
edge_tuple = tuple(map(str,input().split()))
graph.add_edge(*edge_tuple)
return graph
def info(graph):
"""Function that take the graph whose information is to be displayed
Information includes -
number of nodes - Total number of nodes
number of edges - Total number of edges
Average degree - Degree of nodes (Indegree+ Outdegree)
radius - Minimum eccentricity
diameter - Maximum eccentricity
eccentricity - Eccentricity of a node v is the maximum distance from v to
all other nodes in graph.
center - Set of nodes with eccentricity equal to radius.
periphery - Set of nodes with eccentricity equal to the diameter.
Density
"""
try:
print("-"*10,"Node Attributes","-"*10)
print(n.info(graph))
print("-"*10,"Distance Measures","-"*10)
print("radius: %d" % n.radius(graph))
print("diameter: %d" % n.diameter(graph))
print("eccentricity: %s" % n.eccentricity(graph))
print("center: %s" % n.center(graph))
print("periphery: %s" % n.periphery(graph))
print("density: %s" % n.density(graph))
except Exception as ex:
print(ex)
return graph
def save():
try:
filename = input("Enter filename to save graph as:")
plt.savefig(f"{0}.png".format(filename))
print("File Saved Successfully")
except Exception as e:
print("Something went wrong: ",e)
return None
#### MAIN ###
G = n.Graph()
print("Welcome to world of Graphs\n")
while(True):
print("What you want to perform with graphs:\n1.Add node \
\n2.Add edge\n3.Display Information\n4.Save Graph")
query = input().lower()
if (query == "1" or query=="Add node"):
add_nodes(G)
n.draw(G,with_labels=True)
plt.show()
elif (query == "2" or query=="Add edge"):
add_edges(G)
n.draw(G,with_labels=True)
plt.show()
elif (query == "3" or query=="Display Information"):
info(G)
elif (query =="4" or query=="save"):
save()
else:
sys.exit()
| [
"noreply@github.com"
] | snehass135.noreply@github.com |
5b1ab5ecdc9c7d23974f8baeee363624f3d5b617 | dcf65c5811010a38c45c595f1582c94b6a78dd3c | /sites/social/google/__init__.py | 16db10c1090d64b4a39947117690e2ffb086c777 | [] | no_license | jerryxing98/Tully | 7d4d9ee177902cbda0601c7f02715d29a417c2f6 | 78bb082ce16fd3b814d4d590a875bfbcdfdfe5fb | refs/heads/master | 2021-06-25T01:42:20.388899 | 2014-04-11T14:20:33 | 2014-04-11T14:20:33 | 11,405,119 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py | #!/usr/bin/env python
#coding=utf-8
import urllib2, json, urllib, time
from django.conf import settings
def get_access_token(code):
auth_url = settings.GOOGLE_ACCESS_TOKEN_ENDPOINT
body = urllib.urlencode({
'code': code,
'client_id': settings.GOOGLE_API['client_id'],
'client_secret': settings.GOOGLE_API['client_secret'],
'redirect_uri': settings.GOOGLE_API['redirect_urls'],
'grant_type': 'authorization_code'
})
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
req = urllib2.Request(auth_url, body, headers)
resp = urllib2.urlopen(req)
data = json.loads(resp.read())
return data['access_token']
def get_user_info(access_token):
if access_token:
userinfo_url = settings.GOOGLE_USERINFO_ENDPOINT
query_string = urllib.urlencode({'access_token': access_token})
resp = urllib2.urlopen("%s?%s" % (userinfo_url, query_string))
data = json.loads(resp.read())
return data
def get_blog_user(user_data):
if user_data:
blog_user = {}
blog_user['username'] = user_data['name']
blog_user['email'] = user_data['email']
blog_user['avatar'] = user_data['picture']
return blog_user
def get_short_url(url):
data = json.dumps({
'longUrl': url,
})
headers = {"Content-Type": "application/json"}
req = urllib2.Request(settings.GOOGLE_URL_SHORTENER_ENDPOINT, data, headers)
resp = urllib2.urlopen(req)
return json.loads(resp.read()).get('id', None)
class GooglePlusClient(object):
def __init__(self, **kwargs):
if 'access_token' in kwargs:
self.access_token = kwargs['access_token']
if 'refresh_token' in kwargs:
self.refresh_token = kwargs['refresh_token']
if 'expires' in kwargs:
self.expires = kwargs['expires']
elif 'expires_in' in kwargs:
self.expires = time.time() + kwargs['expires_in']
def _refresh_token(self):
auth_url = settings.GOOGLE_ACCESS_TOKEN_ENDPOINT
body = urllib.urlencode({
'client_id': settings.GOOGLE_API['client_id'],
'client_secret': settings.GOOGLE_API['client_secret'],
'refresh_token': self.refresh_token,
'grant_type': 'refresh_token'
})
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
req = urllib2.Request(auth_url, body, headers)
resp = urllib2.urlopen(req)
data = json.loads(resp.read())
self.access_token = data['access_token']
self.expires = time.time() + data['expires_in']
def _get_access_token(self):
if not hasattr(self, 'access_token'):
self._refresh_token()
return self.access_token | [
"jerryxing98@gmail.com"
] | jerryxing98@gmail.com |
31598f939a5d375801ffa4549f2ce013baae15b0 | aaca39e751d9045ac6f94b9982f5d296d4a51724 | /709._To_Lower_Case/python/709. To Lower Case.py | d31bf167cc548e30317de16fb8a3ae9d7229cd6f | [] | no_license | panxl6/leetcode | 5299586148f68fa22b28b7c147aa8620de3664ef | 0dbfa41a96872a4c4042696a51bea0909909b4a6 | refs/heads/master | 2021-07-16T05:55:40.820163 | 2021-05-05T08:15:50 | 2021-05-05T08:15:50 | 54,037,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | # author: 潘兴龙
# date: 2019-02-11 08:25
"""
1) 题型剖析:典型的操作实现题
2) 解法:遇到大写的字母就转换
3) 知识点讲解: 矩阵的操作还有很多,旋转等。
4) follow up:
5) 时间复杂度分析:矩阵遍历,时间复杂度为o(n*m)
6) 备注:
这一类的题目经常出现在手写bug free的代码面试题中。
a. 遗漏了非字母的判断;
b. 记不清是加32,还是减32;
7) 评测结果分析:
"""
class Solution:
def toLowerCase(self, str: str) -> str:
diff = ord('a') - ord('A')
result = []
for char in str:
if not self.isAlpha(char):
result.append(char)
continue
if char > 'Z':
result.append(char)
else:
result.append(chr(ord(char) + diff))
return ''.join(result)
def isAlpha(self, char) -> bool:
if char >= 'a' and char <= 'z':
return True
if char >= 'A' and char <= 'Z':
return True
return False
| [
"sysupanxl6@gmail.com"
] | sysupanxl6@gmail.com |
60bd54bd38d11ab70479c900f2af0537fe11b75e | ba0d2a8c3532d93c4f0dabb60892db350c745433 | /app/app/settings.py | e5d578e72e263c120baf7c4346a30a9f45020d2a | [
"MIT"
] | permissive | jcazallasc/burriking-citibox | 94c0574397d05f98ed8fdb51dfdf4af25114b8e5 | 30f0a4520bedb6b9ba613e8cf279b37f0cc60704 | refs/heads/main | 2023-01-23T17:47:18.094910 | 2020-11-17T08:54:04 | 2020-11-17T08:54:04 | 312,210,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,419 | py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%pyb@e8i+1b62un8+-q3!va5=+=3ku1xy(o0l=v%-p60_b6c6n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
'orders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.JSONParser',
]
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
DEFAULT_CURRENCY_CODE = 'EUR'
DEFAULT_LOCALE = 'es_ES'
| [
"javier.cazallas@Javiers-MacBook-Air.local"
] | javier.cazallas@Javiers-MacBook-Air.local |
bab9256dcbc3b248409060312975f24a2aa03668 | 69f4410c1f435988239c323ea98fd125d5caa003 | /信息内容安全/scrapy/tutorial/tutorial/pipelines.py | 8d93de3a2cbeed995f0f20113e9378e8bc7d6c07 | [
"MIT"
] | permissive | zhcy2018/homeworks | ab832807013d6b8cb9a58534623c7ccffd24c2e0 | 3aaa85e9fcbd999cdb0b05eac76906383437832e | refs/heads/main | 2023-05-07T13:45:56.032530 | 2021-06-02T11:12:35 | 2021-06-02T11:12:35 | 352,532,772 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import json
import time
import random
import redis
class TutorialPipeline:
pool = redis.ConnectionPool(host='127.0.0.1', port=6379, decode_responses=True)
r = redis.Redis(connection_pool=pool)
def process_item(self, item, spider):
self.r.lpush('book_review1',json.dumps(item['book_info']))
return item
| [
"980514750@qq.com"
] | 980514750@qq.com |
dbd232948ff0f4296d567c45e201cb6f89018bda | 5f394d0109932ebb3ea66680ff329102ab90b65c | /Status_module.py | 37c27ad91e9f11c7dd4d914dfa845e4110137344 | [] | no_license | python-project-study/Command-handler_modules | d65fd17f620c38f3ab610f829169dab9815a935d | b3713abb2fc75f7fc9a615c96b907cc91615a90c | refs/heads/master | 2020-05-30T16:53:35.699308 | 2019-06-03T04:00:56 | 2019-06-03T04:00:56 | 189,858,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | ### --- Status module
##### class: status command #####
class Status :
def __init__(self) :
self.my_status = "my status"
def run_status(self, none) :
print("=============== Status ===================")
print(" RS232 : Baud Rate=%dbps, Data=%dbit, Parity=None, Stop=1bit"
%(self.serial_b_rate, self.serial_bytesize))
print(" Serial Port = ", self.serial_port)
print("------------------------------------------")
print(" Network Setting(Telnet Server) Status")
print(" Host IP Address = ", self.ip_address)
print(" TCP Port = ", self.tcp_port)
print("------------------------------------------")
if self.in_num != 0 or self.out_num != 0 :
print("\n----------- Info - Video switch file - \"Input\" ----------------")
for x in range(self.in_num) :
self.input_list.append(self.input_data[x])
print("Input",self.input_list[x]["sysid"], ":", self.input_list[x])
print("\n----------- Info - Video switch file - \"Output\" ---------------")
for x in range(self.out_num) :
self.output_list.append(self.output_data[x])
print("Output",self.output_list[x]["sysid"], ":", self.output_list[x])
print("-------------------------------------------------------------------")
| [
"jisookk33@gmail.com"
] | jisookk33@gmail.com |
5760acd1de2dc2c41b016403e7abfea505df1759 | c4b516432c7f39d0c2ff5ba778f9ce099666ef07 | /scripts/radar_pack.py | 888ab8c8b8d22a8409dad318e193b70dba34d4c8 | [
"MIT"
] | permissive | IncubatorShokuhou/cpol_processing | 6e6a129637f7405f27f43154e8678ed0b7f6b299 | 4a27118678bbdcb5475ec136b0ec0425fcb48876 | refs/heads/master | 2020-09-30T14:01:11.242417 | 2019-10-08T07:42:16 | 2019-10-08T07:42:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,236 | py | """
Raw radar PPIs processing. Quality control, filtering, attenuation correction,
dealiasing, unfolding, hydrometeors calculation, rainfall rate estimation.
Tested on CPOL.
@title: cpol_processing
@author: Valentin Louf <valentin.louf@monash.edu>
@institution: Monash University
@date: 13/03/2019
@version: 2
.. autosummary::
:toctree: generated/
chunks
main
"""
# Python Standard Library
import os
import sys
import glob
import argparse
import datetime
import traceback
import crayons
from concurrent.futures import TimeoutError
from pebble import ProcessPool, ProcessExpired
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
From http://stackoverflow.com/a/312464
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def main(inargs):
"""
It calls the production line and manages it. Buffer function that is used
to catch any problem with the processing line without screwing the whole
multiprocessing stuff.
Parameters:
===========
infile: str
Name of the input radar file.
outpath: str
Path for saving output data.
"""
import warnings
import traceback
infile, outpath, sound_dir, use_unravel = inargs
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import cpol_processing
try:
cpol_processing.process_and_save(infile, outpath, sound_dir=sound_dir, use_unravel=use_unravel)
except Exception:
traceback.print_exc()
return None
return None
def welcome_message():
"""
Display a welcome message with the input information.
"""
print("#" * 79)
print("")
print(" " * 25 + crayons.red("Raw radar PPIs production line.\n", bold=True))
print("\t- Input data directory path is: " + crayons.yellow(INPATH))
print("\t- Output data directory path is: " + crayons.yellow(OUTPATH))
print("\t- Radiosounding directory path is: " + crayons.yellow(SOUND_DIR))
print(f"\t- The process will occur between {crayons.yellow(START_DATE)} and {crayons.yellow(END_DATE)}.")
if USE_UNRAVEL:
print("\t- " + crayons.yellow("UNRAVEL") + " will be used as dealiasing algorithm.")
else:
print("\t- " + crayons.yellow("REGION-BASED") + " will be used as dealiasing algorithm.")
print("\n" + "#" * 79 + "\n")
if __name__ == '__main__':
"""
Global variables definition.
"""
# Main global variables (Path directories).
INPATH = "/g/data/hj10/cpol_level_1a/v2019/ppi/"
OUTPATH = '/g/data/hj10/cpol_level_1b/v2019_new/'
SOUND_DIR = "/g/data2/rr5/CPOL_radar/DARWIN_radiosonde"
# Parse arguments
parser_description = """Raw radar PPIs processing. It provides Quality
control, filtering, attenuation correction, dealiasing, unfolding, hydrometeors
calculation, and rainfall rate estimation."""
parser = argparse.ArgumentParser(description=parser_description)
parser.add_argument(
'-s',
'--start-date',
dest='start_date',
default=None,
type=str,
help='Starting date.',
required=True)
parser.add_argument(
'-e',
'--end-date',
dest='end_date',
default=None,
type=str,
help='Ending date.',
required=True)
parser.add_argument('--unravel', dest='unravel', action='store_true')
parser.add_argument('--no-unravel', dest='unravel', action='store_false')
parser.set_defaults(unravel=True)
args = parser.parse_args()
START_DATE = args.start_date
END_DATE = args.end_date
USE_UNRAVEL = args.unravel
# Display infos
welcome_message()
# Check date
try:
start = datetime.datetime.strptime(START_DATE, "%Y%m%d")
end = datetime.datetime.strptime(END_DATE, "%Y%m%d")
if start > end:
parser.error('End date older than start date.')
date_range = [start + datetime.timedelta(days=x) for x in range(0, (end - start).days + 1, )]
except ValueError:
parser.error('Invalid dates.')
sys.exit()
for day in date_range:
input_dir = os.path.join(INPATH, str(day.year), day.strftime("%Y%m%d"), "*.*")
flist = sorted(glob.glob(input_dir))
if len(flist) == 0:
print('No file found for {}.'.format(day.strftime("%Y-%b-%d")))
continue
print(f'{len(flist)} files found for ' + day.strftime("%Y-%b-%d"))
for flist_chunk in chunks(flist, 32):
arglist = [(f, OUTPATH, SOUND_DIR, USE_UNRAVEL) for f in flist_chunk]
with ProcessPool() as pool:
future = pool.map(main, arglist, timeout=1200)
iterator = future.result()
while True:
try:
result = next(iterator)
except StopIteration:
break
except TimeoutError as error:
print("function took longer than %d seconds" % error.args[1])
except ProcessExpired as error:
print("%s. Exit code: %d" % (error, error.exitcode))
except Exception:
traceback.print_exc()
| [
"v.louf@bom.gov.au"
] | v.louf@bom.gov.au |
2e09a4c9e3a815569c8bedee11ea75a1e0e77ebd | cb5b2234bbd56aee74e39b2b9b84c104ab076434 | /gate_entry_app/admin.py | b50e9a507f43cbcbc0a4107ed5dde0ecc9910d16 | [] | no_license | YernarKambar/gate-entry-system | 8bbc74bcbd82b50512f38b046f35283ddedf515f | 49aad328e3bada684e7a92cb746b1f729863badb | refs/heads/master | 2021-09-24T23:07:34.787512 | 2020-05-10T09:30:48 | 2020-05-10T09:30:48 | 249,390,443 | 0 | 1 | null | 2021-09-22T18:48:26 | 2020-03-23T09:43:23 | CSS | UTF-8 | Python | false | false | 196 | py | from django.contrib import admin
from .models import Person, Role, Gate, AttendanceHistory, Device
# password superuser - qwe
admin.site.register([Person, Role, Gate, AttendanceHistory, Device])
| [
"ernar.k.20@gmail.com"
] | ernar.k.20@gmail.com |
5a0a1efd03c48ad2748c47249854bf9b093606d7 | f615f1d5d027959b365da22419b07dcbec2094d4 | /architectblog/blog/models.py | 6f738416f7d6bf5734dac496491ffa087e6e420b | [] | no_license | mbryantms/architectblog | 823386aacc54706b233c7c4a8a6d48166b4c60c4 | 555b9325fd00cd189f94226b7315ab4dbe774539 | refs/heads/master | 2023-07-16T18:29:36.239842 | 2021-08-27T20:06:58 | 2021-08-27T20:06:58 | 377,636,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,850 | py | from django.db import models
from django.utils.timezone import now
from django.contrib.postgres.search import SearchVectorField
from django.contrib.postgres.indexes import GinIndex
from django.conf import settings
from django.utils.text import Truncator
from django.utils.html import strip_tags, escape
from tinymce.models import HTMLField
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils import timezone
from collections import Counter
import re
tag_re = re.compile("^[a-z0-9]+$")
class Tag(models.Model):
tag = models.SlugField(unique=True)
def __str__(self):
return self.tag
def get_absolute_url(self):
return reverse("blog:tag_detail", args=[self.tag])
def get_link(self, reltag=False):
return mark_safe(
'<a href="%s"%s>%s</a>'
% (self.get_absolute_url(), (reltag and ' rel="tag"' or ""), self)
)
def get_reltag(self):
return self.get_link(reltag=True)
def entry_count(self):
return self.entry_set.count()
def link_count(self):
return self.blogmark_set.count()
def quote_count(self):
return self.quotation_set.count()
def total_count(self):
return self.entry_count() + self.link_count() + self.quote_count()
def all_types_queryset(self):
entries = (
self.entry_set.all()
.annotate(type=models.Value("entry", output_field=models.CharField()))
.values("pk", "created", "type")
)
blogmarks = (
self.blogmark_set.all()
.annotate(type=models.Value("blogmark", output_field=models.CharField()))
.values("pk", "created", "type")
)
quotations = (
self.quotation_set.all()
.annotate(type=models.Value("quotation", output_field=models.CharField()))
.values("pk", "created", "type")
)
return entries.union(blogmarks, quotations).order_by("-created")
def get_related_tags(self, limit=10):
"""Get all items tagged with this, look at /their/ tags, order by count"""
if not hasattr(self, "_related_tags"):
counts = Counter()
for klass, collection in (
(Entry, "entry_set"),
(Blogmark, "blogmark_set"),
(Quotation, "quotation_set"),
):
qs = klass.objects.filter(
pk__in=getattr(self, collection).all()
).values_list("tags__tag", flat=True)
counts.update(t for t in qs if t != self.tag)
tag_names = [p[0] for p in counts.most_common(limit)]
tags_by_name = {t.tag: t for t in Tag.objects.filter(tag__in=tag_names)}
# Need a list in the correct order
self._related_tags = [tags_by_name[name] for name in tag_names]
return self._related_tags
class BaseModel(models.Model):
created_time = models.DateTimeField(
verbose_name="Creation time", default=timezone.now
)
tags = models.ManyToManyField(Tag, blank=True)
slug = models.SlugField(max_length=64)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
metadata = models.JSONField(blank=True, default=dict)
search_document = SearchVectorField(null=True)
@property
def type(self):
return self._meta.model_name
def tag_summary(self):
return " ".join(t.tag for t in self.tags.all())
class Meta:
abstract = True
ordering = ("-created_time",)
indexes = [GinIndex(fields=["search_document"])]
class Series(models.Model):
title = models.CharField(max_length=300)
slug = models.SlugField()
description = models.TextField(blank=True)
class Meta:
verbose_name_plural = "series"
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("series-detail", args=[self.slug])
def get_entries_in_order(self):
return self.entries.order_by("created_time")
class Entry(BaseModel):
STATUS_CHOICES = (
("d", "draft"),
("p", "published"),
)
title = models.CharField("title", max_length=300, unique=True)
content = HTMLField()
pub_time = models.DateTimeField(
"publication time", blank=False, null=False, default=now
)
status = models.CharField(
"status", max_length=1, choices=STATUS_CHOICES, default="p"
)
views = models.PositiveIntegerField("views", default=0)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name="author",
blank=False,
null=False,
on_delete=models.CASCADE,
)
series = models.ForeignKey(
Series, related_name="entries", blank=True, null=True, on_delete=models.SET_NULL
)
def __str__(self):
return (
self.title
if self.title
else Truncator(strip_tags(self.content)).words(15, truncate=" …")
)
def get_absolute_url(self):
return reverse(
"blog:entry_detail",
kwargs={
"slug": self.slug,
"year": self.pub_time.year,
"month": self.pub_time.strftime("%m"),
"day": self.pub_time.day,
},
)
def index_components(self):
return {
"A": self.title,
"C": strip_tags(self.content),
"B": " ".join(self.tags.values_list("tag", flat=True)),
}
class Meta:
verbose_name_plural = "entries"
class Blogmark(BaseModel):
link_url = models.URLField(max_length=1000)
link_title = models.CharField(max_length=255)
via_url = models.URLField(blank=True, null=True)
via_title = models.CharField(max_length=255, blank=True, null=True)
commentary = models.TextField()
is_blogmark = True
def index_components(self):
return {
"A": self.link_title,
"B": " ".join(self.tags.values_list("tag", flat=True)),
"C": self.commentary
+ " "
+ self.link_domain()
+ " "
+ (self.via_title or ""),
}
def __str__(self):
return self.link_title
def link_domain(self):
return self.link_url.split("/")[2]
def word_count(self):
count = len(self.commentary.split())
if count == 1:
return "1 word"
else:
return "%d words" % count
def get_absolute_url(self):
return reverse(
"blog:link_detail",
kwargs={
"slug": self.slug,
"year": self.created_time.year,
"month": self.created_time.strftime("%m"),
"day": self.created_time.day,
},
)
class Quotation(BaseModel):
quotation = models.TextField()
source = models.CharField(max_length=255)
source_url = models.URLField(blank=True, null=True)
is_quotation = True
def index_components(self):
return {
"A": self.quotation,
"B": " ".join(self.tags.values_list("tag", flat=True)),
"C": self.source,
}
def __str__(self):
return self.quotation
def get_absolute_url(self):
return reverse(
"blog:quote_detail",
kwargs={
"slug": self.slug,
"year": self.created_time.year,
"month": self.created_time.strftime("%m"),
"day": self.created_time.day,
},
)
def load_mixed_objects(dicts):
"""
Takes a list of dictionaries, each of which must at least have a 'type'
and a 'pk' key. Returns a list of ORM objects of those various types.
Each returned ORM object has a .original_dict attribute populated.
"""
to_fetch = {}
for d in dicts:
to_fetch.setdefault(d["type"], set()).add(d["pk"])
fetched = {}
for key, model in (
("blogmark", Blogmark),
("entry", Entry),
("quotation", Quotation),
):
ids = to_fetch.get(key) or []
objects = model.objects.prefetch_related("tags").filter(pk__in=ids)
for obj in objects:
fetched[(key, obj.pk)] = obj
# Build list in same order as dicts argument
to_return = []
for d in dicts:
item = fetched.get((d["type"], d["pk"])) or None
if item:
item.original_dict = d
to_return.append(item)
return to_return
| [
"mbryantms@gmail.com"
] | mbryantms@gmail.com |
8e3e9d7bb7b3b9099411eac787873e7da6d3c9e9 | e6de5b03b30e50e004aa272b0fc462789a6860fe | /solves/problem5.py | 581c9106d61bb8cd56b2beb507d92e8db51e6456 | [] | no_license | aoqfonseca/project-euler | 71a100ae9055d8306e0ef68bec1235e47f6b71bc | 6078aaa7fe9d054451071af01df5c1d6ad244bab | refs/heads/master | 2020-05-19T14:44:36.480572 | 2013-01-29T21:20:58 | 2013-01-29T21:20:58 | 2,035,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | number = 20
while True:
is_him = True
for div in range(1,21):
is_him = is_him and (number%div) == 0
if is_him:
break
number+=20
print(number)
| [
"aoqfonseca@gmail.com"
] | aoqfonseca@gmail.com |
ee59ea8489cc423e5053c5e1f04f3b92b54998cf | 15e4e57f1e49ec38fe6c6e86f8309b0105093960 | /python/runFakeRateExtrapolation.py | 6220e321b8681c80102aeda3773d4360f19c1fe6 | [] | no_license | govoni/FlatNtStudy | eb621eb12d3a35bd58cd7f2de2a1c371d81b9eb3 | e58cbfed0c9a5e11822a254a554ecb62a7457ebc | refs/heads/master | 2020-12-24T16:59:17.649955 | 2015-05-13T10:31:05 | 2015-05-13T10:31:05 | 27,048,572 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,639 | py | #! /usr/bin/env pythony
## example python python/runLumiExtrapolation.py --datacardDIR output/DataCards_WW_SS_Inclusive/Card1D/lumiExtrapolation/ --inputVariable ptjj --outputDIR computeAsymptotic --makeAsymptotic --injectSignal 1 --nToys 100 --rMin 0.85 --rMax 1.15
import os
import glob
import math
from array import array
import sys
import time
import subprocess
import ROOT
from optparse import OptionParser
from subprocess import Popen
from collections import defaultdict
############################################
# Job steering #
############################################
parser = OptionParser()
parser.add_option('-b', action='store_true', dest='noX', default=False, help='no X11 windows')
##### other basci options for all the methods
parser.add_option('--datacardDIR', action="store", type="string", dest="datacardDIR", default="", help="direcotry where to find datacard")
parser.add_option('--inputVariable', action="store", type="string", dest="inputVariable", default="", help="name of the input variable to be used")
parser.add_option('--fakeType', action="store", type="string", dest="fakeType", default="", help="name of fake process to be rescaled in the card")
parser.add_option('--outputDIR', action="store", type="string", dest="outputDIR", default="", help="output directory")
parser.add_option('--batchMode', action='store_true', dest='batchMode', default=False, help='to run jobs on condor fnal')
parser.add_option('--queque', action="store", type="string", dest="queque", default="")
parser.add_option('--noGenerateCards', action='store_true', dest='noGenerateCards', default=False, help='not generate again the cards')
parser.add_option('--runCombinationOnly', action='store_true', dest='runCombinationOnly', default=False, help='just run comb cards')
parser.add_option('--runWWWZCombination', action='store_true', dest='runWWWZCombination', default=False, help='WW/WZ cards combination')
parser.add_option('--makeAsymptotic', action="store_true", dest="makeAsymptotic", default=0)
parser.add_option('--makeProfileLikelihood', action="store_true", dest="makeProfileLikelihood", default=0)
parser.add_option('--makeMaxLikelihoodFit', action="store_true", dest="makeMaxLikelihoodFit", default=0)
parser.add_option('--injectSignal', action="store", type=float, dest="injectSignal", default=0., help='inject a singal in the toy generation')
parser.add_option('--nToys', action="store", type="int", dest="nToys", default=0, help="number of toys to generate")
parser.add_option('--bruteForce', action="store", type="int", dest="bruteForce", default=0, help="use brute force for profile likelihood")
parser.add_option('--rMin', action="store", type=float, dest="rMin", default=0)
parser.add_option('--rMax', action="store", type=float, dest="rMax", default=10)
fakeRateScaleFactor = [0.1,0.2,0.5,0.7,0.8,0.9,1.1,1.2,1.5,2,2.5,3.0,4.0,5.0,6.5,8.0,10.0];
(options, args) = parser.parse_args()
##########################################
###### Submit batch job for combine ######
##########################################
def submitBatchJobCombine(command, fn, fileNames):
currentDir = os.getcwd();
# create a dummy bash/csh
outScript = open(fn+".sh","w");
outScript.write('#!/bin/bash \n');
outScript.write('cd '+currentDir+'\n');
outScript.write('eval `scram runtime -sh`'+'\n');
outScript.write('cd - \n');
if fileNames.find("COMB") == -1 :
outScript.write('cp '+currentDir+"/"+fileNames+'* ./ \n');
else :
outScript.write('cp '+currentDir+"/"+fileNames+'* ./ \n');
nametemp = fileNames.replace("COMB","UUpp");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UUmm");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EEpp");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EEmm");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EUpp");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EUmm");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UEpp");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UEmm");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UUU");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EEE");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UUE");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EEU");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
outScript.write(command+'\n');
outScript.write("cp higgsCombine*"+fileNames+"* "+currentDir+"/"+options.outputDIR+'\n');
outScript.write("cp mlfit*"+fileNames+"* "+currentDir+"/"+options.outputDIR+'\n');
outScript.write("rm rootstats* "+'\n');
outScript.close();
os.system("chmod 777 "+currentDir+"/"+fn+".sh");
if options.queque!="" :
os.system("bsub -q "+options.queque+" -o "+currentDir+"/subJob"+fileNames+".log -e "+currentDir+"/subJob"+fileNames+".err "+fn+".sh");
else:
os.system("bsub -q 1nh -o "+currentDir+"/subJob"+fileNames+".log -e "+currentDir+"/subJob"+fileNames+".err "+fn+".sh");
##################################
########### Main Code ############
##################################
if __name__ == '__main__':
print "###### start extrapolate analysis ########";
if options.datacardDIR == "":
sys.exit("provide a datacard directory");
os.chdir(options.datacardDIR);
if not options.noGenerateCards :
for scalefactor in fakeRateScaleFactor :
os.system("rm *_%d.txt"%(scalefactor*10))
os.system("rm *_%d.root"%(scalefactor*10))
## make the card list
for var in options.inputVariable.split(",") :
os.system("ls | grep txt | grep -v COMB | grep _UUpp.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UUmm.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EEpp.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EEmm.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EUpp.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EUmm.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UEpp.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UEmm.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UUU.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EEE.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EEU.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UUE.txt | grep "+options.inputVariable+" >> list.txt");
datacardFile = open("list.txt","r");
datacardList = [];
for iline in datacardFile :
if iline.split(" ")[0]!="" and iline.split(" ")[0]!="#" and iline.split(" ")[0]!=" " and iline.split(" ")[0]!="\n" and iline.split(" ")[0]!="\t":
datacardList.append(iline.split(" ")[0].replace("\n",""));
## total number of datacards
os.system("rm list.txt");
createdCards = [];
fakeColumn = 0 ;
## fake rate on datacard list
if not options.noGenerateCards :
for datacard in datacardList :
observed = 0;
## loop on lumi values
for scalefactor in fakeRateScaleFactor :
inputfile = open('%s'%(datacard),'r');
## create a new root file
inputrootfile = ROOT.TFile(datacard.replace(".txt",".root"),"READ");
outname = datacard ;
outname = datacard.replace(".txt","_%d.txt"%(scalefactor*10));
print "create the new datacard ",outname;
fileNew = open('%s'%(outname), 'w');
createdCards.append(outname);
for ifile in inputfile :
if ifile.find(datacard.replace(".txt",".root"))!=-1 :
line = ifile.replace(datacard.replace(".txt",".root"),datacard.replace(".txt","_%d.root"%(scalefactor*10)));
fileNew.write(line);
continue ;
if ifile.split(" ")[0] != "rate" and ifile.split(" ")[0] != "process" :
fileNew.write(ifile);
continue;
if ifile.split(" ")[0] == "process":
fileNew.write(ifile);
icol = 0;
for columns in ifile.split() :
if columns != options.fakeType :
icol = icol+1;
continue;
else :
fakeColumn = icol;
if ifile.split(" ")[0] == "rate" :
lineToWrite = "rate ";
icol = 0;
for columns in ifile.split() :
if columns == "rate" :
icol = icol+1;
continue ;
elif icol != fakeColumn :
lineToWrite += " %f "%(float(columns));
icol = icol+1;
else :
lineToWrite += " %f "%(float(columns)*scalefactor);
fileNew.write(lineToWrite+"\n");
continue ;
fileNew.close();
## copy root file
outrootname = outname.replace(".txt",".root");
outrootfile = ROOT.TFile("%s"%(outrootname),"RECREATE");
for key in inputrootfile.GetListOfKeys() :
if key.GetClassName().find("TH1") == -1 and key.GetClassName().find("TH2") == -1 :
continue ;
outrootfile.cd();
histo = inputrootfile.Get(key.GetName()).Clone("temp");
if ROOT.TString(key.GetName()).Contains(options.fakeType):
histo.Scale(scalefactor);
histo.Write(key.GetName());
outrootfile.Write();
outrootfile.Close();
else:
for datacard in datacardList :
for scalefactor in fakeRateScaleFactor :
outname = datacard ;
outname = datacard.replace(".txt","_%d.txt"%(scalefactor*10));
createdCards.append(outname);
## merge the two datacard set
if options.outputDIR == "" :
sys.exit("cannot run combine --> outDir to be provided");
else :
os.system("mkdir -p "+options.outputDIR);
## combine the cards
combinedCards = [];
for scalefactor in fakeRateScaleFactor :
for datacard in createdCards :
if datacard.find("_%d"%(scalefactor*10)) != -1 :
if datacard.find("_UEpp") != -1 :
combinedCards.append(datacard.replace("_UEpp","_COMB"));
if datacard.find("_UEmm") != -1 :
combinedCards.append(datacard.replace("_UEmm","_COMB"));
if datacard.find("_EUpp") != -1 :
combinedCards.append(datacard.replace("_EUpp","_COMB"));
if datacard.find("_EUmm") != -1 :
combinedCards.append(datacard.replace("_EUmm","_COMB"));
if datacard.find("_EEpp") != -1 :
combinedCards.append(datacard.replace("_EEpp","_COMB"));
if datacard.find("_EEmm") != -1 :
combinedCards.append(datacard.replace("_EEmm","_COMB"));
if datacard.find("_UUpp") != -1 :
combinedCards.append(datacard.replace("_UUpp","_COMB"));
if datacard.find("_UUmm") != -1 :
combinedCards.append(datacard.replace("_UUmm","_COMB"));
if datacard.find("_UUU") != -1 :
combinedCards.append(datacard.replace("_UUU","_COMB"));
if datacard.find("_EEE") != -1 :
combinedCards.append(datacard.replace("_EEE","_COMB"));
if datacard.find("_UUE") != -1 :
combinedCards.append(datacard.replace("_UUE","_COMB"));
if datacard.find("_EEU") != -1 :
combinedCards.append(datacard.replace("_EEU","_COMB"));
break ;
if not options.noGenerateCards :
for card in combinedCards :
if options.runWWWZCombination :
print "combineCards.py "+card.replace("_COMB","_EEpp")+" "+card.replace("_COMB","_UUpp")+" "+card.replace("_COMB","_EUpp")+" "+card.replace("_COMB","_UEpp")+" "+card.replace("_COMB","_EEmm")+" "+card.replace("_COMB","_UUmm")+" "+card.replace("_COMB","_EUmm")+" "+card.replace("_COMB","_UEmm")+" "+card.replace("_COMB","_UUU")+" "+card.replace("_COMB","_EEE")+" "+card.replace("_COMB","_UUE")+" "+card.replace("_COMB","_EEU")+" > "+card;
os.system("combineCards.py "+card.replace("_COMB","_EEpp")+" "+card.replace("_COMB","_UUpp")+" "+card.replace("_COMB","_EUpp")+" "+card.replace("_COMB","_UEpp")+" "+card.replace("_COMB","_EEmm")+" "+card.replace("_COMB","_UUmm")+" "+card.replace("_COMB","_EUmm")+" "+card.replace("_COMB","_UEmm")+" "+card.replace("_COMB","_UUU")+" "+card.replace("_COMB","_EEE")+" "+card.replace("_COMB","_UUE")+" "+card.replace("_COMB","_EEU")+" > "+card);
else :
print "combineCards.py "+card.replace("_COMB","_EEpp")+" "+card.replace("_COMB","_UUpp")+" "+card.replace("_COMB","_EUpp")+" "+card.replace("_COMB","_UEpp")+" "+card.replace("_COMB","_EEmm")+" "+card.replace("_COMB","_UUmm")+" "+card.replace("_COMB","_EUmm")+" "+card.replace("_COMB","_UEmm")+" > "+card;
os.system("combineCards.py "+card.replace("_COMB","_EEpp")+" "+card.replace("_COMB","_UUpp")+" "+card.replace("_COMB","_EUpp")+" "+card.replace("_COMB","_UEpp")+" "+card.replace("_COMB","_EEmm")+" "+card.replace("_COMB","_UUmm")+" "+card.replace("_COMB","_EUmm")+" "+card.replace("_COMB","_UEmm")+" > "+card);
totalCards = [];
if not options.runCombinationOnly :
totalCards = createdCards + combinedCards
else :
totalCards = combinedCards
for card in totalCards :
outname = card.replace(".txt","");
if options.makeAsymptotic :
runCmmd = "combine -M Asymptotic --minimizerAlgo Minuit2 --minosAlgo stepping -n %s -m 100 -d %s -s -1 --expectSignal=%d -t %d --toysNoSystematics"%(outname,card,options.injectSignal,options.nToys);
print runCmmd ;
if options.batchMode:
fn = "combineScript_Asymptotic_%s"%(outname);
submitBatchJobCombine(runCmmd,fn,outname);
else :
os.system(runCmmd);
os.system("mv higgsCombine* "+options.outputDIR);
os.system("rm roostat*");
continue ;
if options.makeProfileLikelihood :
if options.bruteForce == 0 :
runCmmd = "combine -M ProfileLikelihood --signif -n %s -m 100 -d %s -t %d --expectSignal=%d -s -1 --toysNoSystematics"%(outname,card,options.nToys,options.injectSignal);
else:
runCmmd = "combine -M ProfileLikelihood --signif -n %s -m 100 -d %s -t %d --expectSignal=%d -s -1 --toysNoSystematics --bruteForce"%(outname,card,options.nToys,options.injectSignal);
print "runCmmd ",runCmmd;
if options.batchMode:
fn = "combineScript_ProfileLikelihood_exp_%s_iToy_%d"%(outname,options.nToys);
submitBatchJobCombine(runCmmd,fn,outname);
else:
os.system(runCmmd);
os.system("mv higgsCombine* "+options.outputDIR);
os.system("rm roostat* ");
continue ;
if options.makeMaxLikelihoodFit :
runCmmd = "combine -M MaxLikelihoodFit --minimizerAlgo Minuit2 --minimizerStrategy 1 --rMin %f --rMax %f --saveNormalizations --saveWithUncertainties -n %s -m 100 -d %s --do95=1 --robustFit=1 -s -1 -t %d --expectSignal %d --toysNoSystematics --skipBOnlyFit"%(options.rMin,options.rMax,outname,card,options.nToys,options.injectSignal);
print runCmmd ;
if options.batchMode:
fn = "combineScript_MaxLikelihoodFit_%s_nToys_%d"%(outname,options.nToys);
submitBatchJobCombine(runCmmd,fn,outname);
else:
os.system(runCmmd);
os.system("mv higgsCombine* "+options.outputDIR);
os.system("mv mlfit* "+options.outputDIR);
os.system("rm roostat* ");
continue ;
| [
"raffaele.gerosa@cern.ch"
] | raffaele.gerosa@cern.ch |
5716310b1d277a7b06a28ebd7482656970c66386 | 868d88fa52f458f1e155b174c19991f2a036a549 | /old/do4.py | ba3f25d1aba6b2107c4cfa4e5e088009b87e047a | [] | no_license | amuriy/MISC | 269db95744281f10fa4f5b3bfcc0b19f9d9829a6 | 27ee9eff88cb6076cd5f63d414851a23d910faa6 | refs/heads/master | 2020-12-04T13:31:58.159228 | 2020-07-14T04:59:58 | 2020-07-14T04:59:58 | 67,042,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,362 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## pip install psycopg2
## pip install geocoder
## pip install wikimapia_api
## pip install bs4
## pip install wikipedia
## pip install sparqlwrapper
## pip install wptools
import psycopg2
import geocoder
import sys
import time
import re
import requests
# from bs4 import BeautifulSoup
import wikipedia
from SPARQLWrapper import SPARQLWrapper, JSON
# from dadata import DaDataClient
## functions
def get_qnumber(wikiarticle, wikisite):
resp = requests.get('https://www.wikidata.org/w/api.php', {
'action': 'wbgetentities',
'titles': wikiarticle,
'sites': wikisite,
'props': '',
'format': 'json'
}).json()
return list(resp['entities'])[0]
def place_wikidata_info(name, lon, lat):
uname = unicode(name, "utf-8")
wikipedia.set_lang("ru")
search = wikipedia.geosearch(lon,lat)
sparql = SPARQLWrapper("https://query.wikidata.org/sparql", agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36')
if uname in [x for x in search]:
qnumber = get_qnumber(wikiarticle="%s" % name, wikisite="ruwiki")
query = """
SELECT DISTINCT ?item ?itemLabel ?population ?okato_id ?oktmo_id ?locality WHERE {
FILTER(?item IN(wd:%s))
OPTIONAL { ?item wdt:P1082 ?population. }
OPTIONAL { ?item wdt:P721 ?okato_id. }
OPTIONAL { ?item wdt:P764 ?oktmo_id. }
OPTIONAL { ?item wdt:P131 ?locality. }
SERVICE wikibase:label { bd:serviceParam wikibase:language "ru". }
}
"""
query2 = query % qnumber
sparql.setQuery(query2)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
locality = results[u'results'][u'bindings'][0][u'locality'][u'value']
item = results[u'results'][u'bindings'][0][u'item'][u'value']
okato_id = results[u'results'][u'bindings'][0][u'okato_id'][u'value']
oktmo_id = results[u'results'][u'bindings'][0][u'oktmo_id'][u'value']
population = results[u'results'][u'bindings'][0][u'population'][u'value']
d = dict()
d['locality'] = '%s' % locality
d['item'] = '%s' % item
d['okato_id'] = '%s' % okato_id
d['oktmo_id'] = '%s' % oktmo_id
d['population'] = '%s' % population
return d
# res = place_wikidata_info('Балабаново',55.182545653891,36.6617494478658)
# print res
def places_list_db(host, db, user, pwd):
try:
conn = psycopg2.connect(host = host, database = db, user = user, password = pwd)
except psycopg2.OperationalError as e:
print('Unable to connect!\n{0}').format(e)
sys.exit(1)
else:
cursor = conn.cursor()
# cursor.execute("select name, st_astext(st_centroid(st_transform(way, 4326))) from planet_osm_polygon where place in ('city','town','village','hamlet','isolated_dwelling') order by name limit 10;")
cursor.execute("select name, st_astext(st_centroid(st_transform(way, 4326))) from planet_osm_polygon where place in ('city','town') order by name limit 5;")
res = cursor.fetchall()
cursor.close()
conn.close()
return res
def place_geocoder_info(lon, lat):
time.sleep(2)
g = geocoder.yandex([lon,lat], kind = 'locality', method='reverse', lang = 'ru-RU')
addr = g.address.encode('utf8')
## добавить проверку на полноту адреса; если нет района или муниц. образования, запускаем обратный геокодинг Nominatim
if region in addr:
return g.address.encode('utf8')
def place_dadata_info(address):
# client = DaDataClient(key = '%s' % key, secret = '%s' % secret)
client.address = '%s' % address
client.address.request()
okato = client.result.okato.encode('utf8')
oktmo = client.result.oktmo.encode('utf8')
print('Код ОКАТО ---> %s' % okato)
print('Код ОКТМО ---> %s' % oktmo)
## TODO: print FIAS codes !!! #
def place_wikimapia_info(lon, lat):
# API.config.key = '%s' % wikimapia_key
# API.config.language = 'ru'
time.sleep(5)
place = API.places.nearest(lat, lon, category = 949, data_blocks='location')
if place[0] is None:
time.sleep(5)
place = API.places.nearest(lat, lon, category = 88, data_blocks='location')
if place and place[0] is not None:
purl = [p['url'] for p in place][0]
url = ''.join(purl).encode('utf8')
print('URL на Викимапии ---> %s' % url)
response = requests.get(url)
if (response is not None) and (response.status_code == 200):
soup = BeautifulSoup(response.text, "html.parser")
for i in soup.find_all('meta'):
res = re.search('Население(.*)перепись', str(i).replace('\n',' '))
if res:
wminfo = res.group(0)
if res is None:
res = re.search('население(.*)перепись', str(i).replace('\n',' '))
wminfo = res.group(0)
print wminfo
if wminfo is not None:
print('Описание на Викимапии ---> %s' % wminfo)
pop_list = [int(s) for s in wminfo.split() if s.isdigit()]
if len(pop_list) > 1:
if pop_list[1] == 2010:
pop = pop_list[0]
else:
pop = pop_list[1]
elif len(pop_list) == 1:
pop = pop_list[0]
print('Население ---> %s' % pop)
places = places_list_db('localhost', 'osm_ru_klu', 'user', '')
for line in places:
name = line[0]
latlon = line[1].replace('POINT(','').replace(')','')
lat = latlon.split(' ')[0]
lon = latlon.split(' ')[1]
lonlat = '%s,%s' % (lon, lat)
print name
print lonlat
place_geocoder_info(lon,lat)
# region = 'Калужская'
# for line in res:
# name = line[0]
# latlon = line[1].replace('POINT(','').replace(')','')
# lat = latlon.split(' ')[0]
# lon = latlon.split(' ')[1]
# lonlat = '%s,%s' % (lon, lat)
# time.sleep(2)
# g = geocoder.yandex([lon,lat], kind = 'locality', method='reverse', lang = 'ru-RU')
# addr = g.address.encode('utf8')
# if region in addr:
# print name
# print lonlat
# print g.address.encode('utf8')
# time.sleep(5)
# place = API.places.nearest(lat, lon, category = 949, data_blocks='location')
# if place[0] is None:
# time.sleep(5)
# place = API.places.nearest(lat, lon, category = 88, data_blocks='location')
# if place and place[0] is not None:
# purl = [p['url'] for p in place][0]
# url = ''.join(purl).encode('utf8')
# print('URL на Викимапии ---> %s' % url)
# response = requests.get(url)
# if (response is not None) and (response.status_code == 200):
# soup = BeautifulSoup(response.text, "html.parser")
# for i in soup.find_all('meta'):
# res = re.search('Население(.*)перепись', str(i).replace('\n',' '))
# if res:
# wminfo = res.group(0)
# if res is None:
# res = re.search('население(.*)перепись', str(i).replace('\n',' '))
# wminfo = res.group(0)
# print wminfo
# if wminfo is not None:
# print('Описание на Викимапии ---> %s' % wminfo)
# pop_list = [int(s) for s in wminfo.split() if s.isdigit()]
# if len(pop_list) > 1:
# if pop_list[1] == 2010:
# pop = pop_list[0]
# else:
# pop = pop_list[1]
# elif len(pop_list) == 1:
# pop = pop_list[0]
# print('Население ---> %s' % pop)
# print ''
# s = str(soup.findAll('meta')[1]).replace("'",'"').replace('\n',' ')
# if 'население' or 'Население' or 'перепись' in s:
# start = '<meta content="'
# end = '"description"/>'
# try:
# wminfo = re.search('%s(.*)%s' % (start, end), s).group(1)
# except AttributeError:
# wminfo = re.search('%s(.*)%s' % (start, end), s)
# if wminfo is not None:
# print('Описание на Викимапии ---> %s' % wminfo)
# pop_list = [int(s) for s in wminfo.split() if s.isdigit()]
# if len(pop_list) > 1:
# if pop_list[1] == 2010:
# pop = pop_list[0]
# else:
# pop = pop_list[1]
# elif len(pop_list) == 1:
# pop = pop_list[0]
# print('Население ---> %s' % pop)
# print ''
# else:
# for i in soup.find_all('meta'):
# res = re.search('Население(.*)перепись', str(i))
# if res is None:
# res = re.search('население(.*)перепись', str(i))
# if res:
# wminfo = res.group(0)
# if wminfo is not None:
# print('Описание на Викимапии ---> %s' % wminfo)
# pop_list = [int(s) for s in wminfo.split() if s.isdigit()]
# if len(pop_list) > 1:
# if pop_list[1] == 2010:
# pop = pop_list[0]
# else:
# pop = pop_list[1]
# elif len(pop_list) == 1:
# pop = pop_list[0]
# print('Население ---> %s' % pop)
# print ''
# def get_wikimapia_info
# ### DO IT
# with open('/home/amuriy/Downloads/00___TODO___00/osm_np_kondr100km/22.csv', 'r') as f:
# with open ('/home/amuriy/Downloads/00___TODO___00/osm_np_kondr100km/Kal_obl_results.txt','w') as fout:
# lines = f.read().splitlines()
# region = 'Калужская область'
# for line in lines:
# rline = ('%s' + ', %s') % (line, region)
# print rline
# time.sleep(2)
# g = geocoder.yandex(rline.rstrip(), maxRows=20, lang = 'ru-RU')
# for res in g:
# if (res.country is not None) and (res.country.encode('utf8') == 'Россия') and ('река' or 'улица' not in res.address.encode('utf8')):
# if line in res.address.encode('utf8'):
# address = res.address.encode('utf8')
# print('Адрес ---> %s' % address)
# lon = res.latlng[0].encode('utf8')
# lat = res.latlng[1].encode('utf8')
# print('Координаты ---> %s, %s') % (lon, lat)
# time.sleep(3)
# url_search = 'http://api.wikimapia.org/?function=search&key=' + wikimapia_key + '&q=' + line + '&lon=' + lon + '&lat=' + lat + '&disable=location,polygon&language=ru'
# print url_search
# response = requests.get(url_search)
# soup = BeautifulSoup(response.text, "html.parser")
# for place in soup.findAll('place'):
# s = str(place)
# start = '<name>'
# end = '</name>'
# fname = re.search('%s(.*)%s' % (start, end), s).group(1)
# if fname == line:
# start = 'id="'
# end = '">'
# fid = re.search('%s(.*)%s' % (start, end), s).group(1)
# print fid
# time.sleep(3)
# url_info = 'http://api.wikimapia.org/?function=place.getbyid&key=' + wikimapia_key + '&id=' + fid + '&data_blocks=main'
# print url_info
# response = requests.get(url_info)
# soup = BeautifulSoup(response.text, "html.parser")
# result = str(soup.findAll('description')[0])
# if result is not None:
# pop_list = [int(s) for s in result.split() if s.isdigit()]
# if len(pop_list) > 1:
# if pop_list[1] == 2010:
# pop = pop_list[0]
# else:
# pop = pop_list[1]
# elif len(pop_list) == 1:
# pop = pop_list[0]
# print pop
# # outline = ('%s, %s, %s, %s\n') % (line, lat, lon, pop)
# # print outline
# # fout.write(outline)
# # print ''
# # sys.exit(1)
| [
"amuriy@gmail.com"
] | amuriy@gmail.com |
70ed19c70da4256e4f1960b216e0f63a9984ec46 | 95890713ce369691a1782219c94899c138e856b5 | /ceilometer_ecs/__init__.py | 5074fb852d0ffe550bef36306761bf4d1cc2e0f6 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | QD888/ceilometer-ecs | f17b44d31bcf6fa6dfe7ce6f3cb203a233ae5ebc | c2d997a1943da2cac3d556d7a5cb74af0e8dd58f | refs/heads/master | 2022-02-06T02:45:53.197717 | 2016-07-06T13:46:47 | 2016-07-06T13:46:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | import pbr.version
__version__ = pbr.version.VersionInfo(
'ceilometer_ecs').version_string()
| [
"Jiale.Huo@emc.com"
] | Jiale.Huo@emc.com |
451819e65d89942721e8114483e6aa288dcd0879 | 215a5d6d655eeab07345849ed13b968b9fe520c2 | /Etdclass.py | 8c77fc0945bdbab11ed87dea5894aff1ce3fd8e9 | [] | no_license | sirakfilho/ETD-01 | fae3650017140b14aecb5494bf33e31b50bc05cd | ef552f96e04b35d6eb63bcfe6b86c6390ed7064b | refs/heads/master | 2020-09-05T12:56:36.068902 | 2019-11-06T23:49:28 | 2019-11-06T23:49:28 | 220,111,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | class Etdclass:
def __init__(self):
# variaveis do Academico
self.bancoAcademico = []
self.nomeAcademico = ""
self.tituloAcademico = ""
self.descricaoAcademico = ""
self.numParticipantesAcademico = ""
self.chaveAcademico = ""
# variaveis de Reuniao
self.agendarReuniao = []
self.registrarReuniao = ""
self.arquivodeReuniao = ""
| [
"noreply@github.com"
] | sirakfilho.noreply@github.com |
d0e979ec26dd550985879e92a80114873a539b89 | 7cdd5cc2fc0a11250b922daa53d7fcede895e643 | /sources/character.py | 91b7c8c754253ccb645d1da4839cf7c8824e4912 | [] | no_license | pauvarela3/acadaemia | 708ce3a14a60bd9cae7bc0a803b6857c0cf96f37 | efeec5d9eb066c9a6da1178cf96ad548fd36b3d2 | refs/heads/master | 2021-05-15T21:22:15.527922 | 2017-10-26T22:25:53 | 2017-10-26T22:25:53 | 106,485,607 | 0 | 0 | null | 2017-10-11T00:25:05 | 2017-10-11T00:25:05 | null | UTF-8 | Python | false | false | 1,690 | py | """This is a file defines the character class
"""
from objects import gameObject
class character(gameObject):
def __init__(self, name, gender):
super(character, self).__init__()
self.name = name
self.gender = gender
self._status.update({'HP':100, 'AP':100, 'happyness': 100, 'strength': 0,
'endurance':0 , 'charisma':0, 'intelligence': 0,
'luck':0, 'stealth':0, 'consciousness':100})
self.items = []
self.location = None
self.x_pos = 0.0
self.y_pos = 0.0
self.GPA = 0.0
self.items_onhand = []
self.skills = []
self.money = 0
self.age = 0
def move(self, direction, units):
pass
def go_to(self, location):
pass
def pick_up(self, obj):
pass
def throw(self, obj, to_obj):
pass
def check(self, obj):
pass
def transfer_item(self, obj, to_obj):
pass
def use_item(self, obj, on_obj=None):
for k, v in obj.effect.items():
if on_obj is None:
self._status[k] = self._status[k] + v
else:
on_obj._status[k] = on_obj._status[k] + v
def learn_skills(self, obj):
pass
def make_obj(self, obj):
pass
def buy(self, obj, from_obj):
cost = obj._status['value']
if cost > self.money:
print("Hey, go and make more money")
else:
self.money -= cost
self.items += [obj,]
def sell(self, obj, to):
pass
def trade(self, offer, request, target):
pass
def steal(self, obj, from_obj):
pass
| [
"paulinavarela@paulinas-MacBook-Pro-6.local"
] | paulinavarela@paulinas-MacBook-Pro-6.local |
9faee59c0f45d8684657cc02616de807f9e50c64 | aba5a125f84e6b78bc44a88b21eed68517195555 | /modules/api.py | 4be7044fb26e4474c08b1189288ed7bd7b967d90 | [
"MIT"
] | permissive | veffhz/namestat_v2 | 8a612980d8dfbfd93b5f7434bdd9388adb30f0f6 | ebe8451d8cc2fb9a2b8309b4bfb157f86251d6d6 | refs/heads/master | 2020-03-23T13:28:34.733104 | 2018-07-25T14:22:18 | 2018-07-25T14:22:18 | 141,620,195 | 0 | 0 | MIT | 2018-07-25T14:22:19 | 2018-07-19T19:03:15 | Python | UTF-8 | Python | false | false | 977 | py | import ast
from modules import helpers
from modules.analyze_handlers import get_functions_in_tree
from modules.analyze_handlers import get_trees_in_specified_path
def get_top_functions_names_in_path(path, top_size, language):
trees = get_trees_in_specified_path(path, language)
functions_names = [name for name in helpers.flatten_list([get_functions_in_tree(tree) for tree in trees])
if not helpers.is_function_built_in(name)]
return helpers.most_common(functions_names, top_size)
def get_nodes_names_in_tree(tree):
return [node.id for node in ast.walk(tree) if isinstance(node, ast.Name)]
def get_all_names_in_path(path, language):
trees = get_trees_in_specified_path(path, language)
names = [name for name in helpers.flatten_list([get_nodes_names_in_tree(tree) for tree in trees])
if not helpers.is_function_built_in(name)]
return helpers.flatten_list([helpers.split_case_name(name) for name in names])
| [
"veffhz@gmail.com"
] | veffhz@gmail.com |
717a782303e4a87687213821bb835c381c2f657d | a5b01641e369270875bf832d357361d45d7782d9 | /Yule-Walker/test.py | c2f9b0a0a21b2eebcae454af073b9a8a81c54b27 | [] | no_license | k0305916/MachineLearning | 1687ecb6d4da20024d149189dc84e802b19af54a | 9522fffa59089e3771afea1317f6ba58946b27b7 | refs/heads/master | 2022-02-04T21:17:25.189866 | 2022-01-08T07:25:03 | 2022-01-08T07:25:03 | 127,285,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | from scipy.signal import lfilter
from spectrum import *
from numpy.random import randn
import matplotlib.pyplot as plt
A =[1, -2.7607, 3.8106, -2.6535, 0.9238]
noise = randn(1, 1024)
y = lfilter([1], A, noise)
#filter a white noise input to create AR(4) process
count = 20
[ar, var, reflec] = aryule(y[0], count)
# ar should contains values similar to A
# yhat = np.concatenate(y[0], ar)
# plt.plot(y[0])
# plt.plot(yhat)
plt.plot(noise[0])
plt.plot(y[0])
# id = np.arange(0, count, 1)
# plt.plot(ar)
# plt.scatter(id, ar)
plt.show()
print("over") | [
"fan.li01@sap.com"
] | fan.li01@sap.com |
ed7e287dd6291709d1f6f3d0b5744fe3ebb5e52b | d6dd01342a88fb3167237fbfb536108f119944f0 | /general_analysis.py | d7325dd9ff7ccdcd655225c5981bdc94ebd9b800 | [] | no_license | slam36/unsupervised-learning | b664672aba4168a2820316b2f37aec6be81fec52 | 513e9a855a9534f5d9897d0713276ee4d3f8bfea | refs/heads/master | 2020-09-03T02:27:06.929309 | 2019-11-08T23:26:44 | 2019-11-08T23:26:44 | 219,361,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,231 | py | import csv
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import seaborn as sn
import pandas as pd
from sklearn.model_selection import learning_curve
from sklearn.metrics import *
import time
def show_auc(y_true, y_score):
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic Plot')
plt.legend(loc="lower right")
plt.show()
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=5,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
#This method I got from BD4H which I am also taking this semester
def classification_metrics(Y_pred, Y_true):
#NOTE: It is important to provide the output in the same order
accuracy = accuracy_score(Y_true, Y_pred)
auc = roc_auc_score(Y_true, Y_pred)
precision = precision_score(Y_true, Y_pred)
recall = recall_score(Y_true, Y_pred)
f1score = f1_score(Y_pred, Y_true)
return accuracy, auc, precision, recall, f1score
#This method I got from BD4H which I am also taking this semester
def display_metrics(classifierName,Y_pred,Y_true):
print("______________________________________________")
acc, auc_, precision, recall, f1score = classification_metrics(Y_pred,Y_true)
print(("Accuracy: "+str(acc)))
print(("AUC: "+str(auc_)))
print(("Precision: "+str(precision)))
print(("Recall: "+str(recall)))
print(("F1-score: "+str(f1score)))
print("______________________________________________")
print("")
def general_analysis(classifier, classifier_name, X_train, y_train, X_test, y_test, y_pred):
print("performing analysis for " + classifier_name)
classifier.fit(X_train, y_train)
y_predict = classifier.predict(X_test)
y_predict = y_predict.astype(int)
y_predict = np.array(y_predict)
print(classifier)
show_auc(y_test, y_pred)
array = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(array, index = [i for i in ["Not Spam", "Spam"]], columns = [i for i in ["Not Spam", "Spam"]])
ax = sn.heatmap(df_cm, annot=True, cmap=sn.cm.rocket_r)
ax.set_title(str(classifier_name) + " " + "Confusion Matrix", fontsize=20)
plt.xlabel("Predicted", fontsize=18)
plt.ylabel("Actual", fontsize=18)
start = time.time()
plot_learning_curve(classifier, classifier_name + "Learning Curve", X_train, y_train, cv=5, train_sizes = np.linspace(0.1, 1, 10))
elapsed_time = time.time() - start
print(str(elapsed_time) + " sec")
display_metrics(str(classifier_name), y_pred, y_test)
accuracy, auc, precision, recall, f1score= classification_metrics(y_pred, y_test)
return [accuracy, auc]
| [
"noreply@github.com"
] | slam36.noreply@github.com |
d778fbf760aa56ea3f8892c5ad20900286e975a4 | 87476a78cd94a98c2e018c406460d3cbdd9b7ca4 | /tools/email_preprocess.py | df912053c309e6eaa0eaf5f34dec49d3dcce5ba0 | [] | no_license | AliMehrpour/MachineLearning | ea9b6034ab2f01b2d2d8b4d2ec527f855596f174 | d6acec9048adb92815beee0da5e861c3ef13d8c0 | refs/heads/master | 2021-05-04T07:08:39.000110 | 2016-11-22T07:44:06 | 2016-11-22T07:44:06 | 70,568,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | #!/usr/bin/python
import pickle
import cPickle
import numpy
from sklearn import cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
def preprocess(words_file="../tools/word_data.pkl", authors_file="../tools/email_authors.pkl"):
"""
this function takes a pre-made list of email texts (by default word_data.pkl)
and the corresponding authors (by default email_authors.pkl) and performs
a number of preprocessing steps:
-- splits into training/testing sets (10% testing)
-- vectorizes into tfidf matrix
-- selects/keeps most helpful features
after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions
4 objects are returned:
-- training/testing features
-- training/testing labels
"""
### the words (features) and authors (labels), already largely preprocessed
### this preprocessing will be repeated in the text learning mini-project
words_file = open(words_file, "r")
words = pickle.load(words_file)
authors_file = open(authors_file, "r")
authors = pickle.load(authors_file)
### test_size is the percentage of events assigned to the test set
### (remainder go into training)
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(words, authors, test_size = 0.1, random_state = 42)
### text vectorization--go from strings to lists of numbers
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')
features_train_transformed = vectorizer.fit_transform(features_train)
features_test_transformed = vectorizer.transform(features_test)
### feature selection, because text is super high dimensional and
### can be really computationally chewy as a result
selector = SelectPercentile(f_classif, percentile=1)
selector.fit(features_train_transformed, labels_train)
features_train_transformed = selector.transform(features_train_transformed).toarray()
features_test_transformed = selector.transform(features_test_transformed).toarray()
### info on the data
print "no. of Chris training emails: ", sum(labels_train)
print "no. of Sara training emails: ", len(labels_train) - sum(labels_train)
return features_train_transformed, features_test_transformed, labels_train, labels_test
| [
"alimehrpour@gmail.com"
] | alimehrpour@gmail.com |
82fd787ff515121c96abf65456f9a4e538d43d6b | 1a074bc42f351071538c905423e75e6d37bc20c8 | /practise/fn.py | 072fb379cdb164adf623d7e2ef7b716b2ea451a5 | [] | no_license | zhouatie/python | 587dd20938045a81fcb44522a587a210af16f9f5 | 56632e1c66e4ba89ee8431cce493f63eb15e9c0f | refs/heads/master | 2020-03-17T00:45:06.260191 | 2018-05-26T17:52:39 | 2018-05-26T17:52:39 | 133,128,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | def printStr(a):
print(a)
arr = [1,2,6] | [
"zhoust@guahao.com"
] | zhoust@guahao.com |
d807c711dfd494e037aa22535ff88a7ef1670443 | 7b2f54bb106acf31f0c3b08e5cc7c55f999b8f55 | /dojo/tools/qualys_infrascan_webgui/parser.py | 29c16742e649b7ac9759d63db48f557904a7fedd | [
"BSD-2-Clause",
"BSD-3-Clause",
"MIT-open-group",
"libtiff",
"GCC-exception-2.0",
"EPL-2.0",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT-Modern-Variant",
"GCC-exception-3.1",
"BSD-3-Clause-Modification",
"... | permissive | devGregA/django-DefectDojo | 546ad06beeba14a105ff8390544f8975fe3dbc77 | 33d387e7682a410b336f528078a0ade38c42c00e | refs/heads/master | 2023-04-09T18:08:23.844584 | 2022-12-14T02:19:46 | 2022-12-14T02:19:46 | 487,615,561 | 1 | 0 | BSD-3-Clause | 2022-05-01T18:44:47 | 2022-05-01T18:44:46 | null | UTF-8 | Python | false | false | 4,808 | py | import logging
from datetime import datetime
import html2text
from dateutil import parser
from defusedxml import ElementTree
from dojo.models import Endpoint, Finding
logger = logging.getLogger(__name__)
def htmltext(blob):
h = html2text.HTML2Text()
h.ignore_links = False
return h.handle(blob)
def issue_r(raw_row, vuln, scan_date):
ret_rows = []
issue_row = {}
# IP ADDRESS
issue_row['ip_address'] = raw_row.get('value')
# FQDN
issue_row['fqdn'] = raw_row.get('name')
if issue_row['fqdn'] == "No registered hostname":
issue_row['fqdn'] = None
# port
_port = raw_row.get('port')
# Create Endpoint
if issue_row['fqdn']:
ep = Endpoint(host=issue_row['fqdn'])
else:
ep = Endpoint(host=issue_row['ip_address'])
# OS NAME
issue_row['os'] = raw_row.findtext('OS')
# Scan details - VULNS//VULN indicates we only care about confirmed vulnerabilities
for vuln_cat in raw_row.findall('VULNS/CAT'):
_category = str(vuln_cat.get('value'))
for vuln_details in vuln_cat.findall('VULN'):
_temp = issue_row
_gid = vuln_details.get('number')
_temp['port_status'] = _port
_result = str(vuln_details.findtext('RESULT'))
# Vuln name
_temp['vuln_name'] = vuln_details.findtext('TITLE')
# Vuln Description
_description = str(vuln_details.findtext('DIAGNOSIS'))
# Solution Strips Heading Workaround(s)
_temp['solution'] = htmltext(str(vuln_details.findtext('SOLUTION')))
# Vuln_description
_temp['vuln_description'] = "\n".join([htmltext(_description),
htmltext("**Category:** " + _category),
htmltext("**QID:** " + str(_gid)),
htmltext("**Port:** " + str(_port)),
htmltext("**Result Evidence:** " + _result),
])
# Impact description
_temp['IMPACT'] = htmltext(str(vuln_details.findtext('CONSEQUENCE')))
# CVE and LINKS
_cl = []
_temp_cve_details = vuln_details.iterfind('CVE_ID_LIST/CVE_ID')
if _temp_cve_details:
_cl = {cve_detail.findtext('ID'): cve_detail.findtext('URL') for cve_detail in _temp_cve_details}
_temp['cve'] = "\n".join(list(_cl.keys()))
_temp['links'] = "\n".join(list(_cl.values()))
# The CVE in Qualys report might not have a CVSS score, so findings are informational by default
# unless we can find map to a Severity OR a CVSS score from the findings detail.
sev = qualys_convert_severity(vuln_details.get('severity'))
refs = "\n".join(list(_cl.values()))
finding = Finding(title=_temp['vuln_name'],
mitigation=_temp['solution'],
description=_temp['vuln_description'],
severity=sev,
references=refs,
impact=_temp['IMPACT'],
vuln_id_from_tool=_gid,
date=scan_date,
)
finding.unsaved_endpoints = list()
finding.unsaved_endpoints.append(ep)
ret_rows.append(finding)
return ret_rows
def qualys_convert_severity(raw_val):
val = str(raw_val).strip()
if '1' == val:
return 'Info'
elif '2' == val:
return 'Low'
elif '3' == val:
return 'Medium'
elif '4' == val:
return 'High'
elif '5' == val:
return 'Critical'
else:
return 'Info'
class QualysInfrascanWebguiParser(object):
def get_scan_types(self):
return ["Qualys Infrastructure Scan (WebGUI XML)"]
def get_label_for_scan_types(self, scan_type):
return scan_type # no custom label for now
def get_description_for_scan_types(self, scan_type):
return "Qualys WebGUI output files can be imported in XML format."
def get_findings(self, file, test):
data = ElementTree.parse(file).getroot()
# fetch scan date e.g.: <KEY value="DATE">2020-01-30T09:45:41Z</KEY>
scan_date = datetime.now()
for i in data.findall('HEADER/KEY'):
if i.get('value') == 'DATE':
scan_date = parser.isoparse(i.text)
master_list = []
for issue in data.findall('IP'):
master_list += issue_r(issue, data, scan_date)
return master_list
| [
"noreply@github.com"
] | devGregA.noreply@github.com |
06d53d27cf6030de730e01cc8bee684b5fbf80e9 | 527a91b2f697cbca425872ae5f167181ed6191cc | /Shopify_API_Chall2019/urls.py | f51aca57d10cf00f78326aa586fea74c9605e788 | [] | no_license | MoeBazziGIT/Shopify-Backend-Challenge-2019 | 42629e139b5d5c711563dc39b571e05a26bb2f3b | bf5bf61bda77ebf3c2da34a39ad8e8236fcf7aef | refs/heads/master | 2020-04-16T21:55:22.003337 | 2019-01-19T18:30:51 | 2019-01-19T18:30:51 | 165,945,407 | 0 | 0 | null | 2019-01-19T18:30:52 | 2019-01-16T00:31:30 | Python | UTF-8 | Python | false | false | 1,110 | py | from django.contrib import admin
from django.urls import path, include
from rest_framework.urlpatterns import format_suffix_patterns
from products import views as product_views
from cart import views as cart_views
from rest_framework import routers
# setting up router
router = routers.DefaultRouter()
# these two routers will provide url mappings to main views of the API
# /products/ --> will get all products in the database
# /products/<product_id>/ --> will get a specific product
router.register('products', product_views.ProductView, base_name='products')
# /carts/ --> will get all carts in the database
# /carts/<cart_id>/ --> will get a specific cart
router.register('carts', cart_views.CartView, base_name='carts')
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(router.urls)),
path('carts/', include('cart.urls', namespace='carts'))
]
# the reason we include cart.urls is because this app has some extra functionality with
# the carts ex. --> carts/checkout, add, remove. These are function based views that arent
# included with the routers above.
| [
"bazzimoe3@gmail.com"
] | bazzimoe3@gmail.com |
6153f9fc3ea1bfc715efc6a24611642f39237bba | e32e321f9f8a6b4e2fc5ab75ed3d6391e3701eb6 | /1 - Sequencial/3.py | 034c8cbabfd440c231061a889dde49ae3b58b206 | [] | no_license | AlineFreitas/ProgrammingExercises | 383b6a81928ed12e8e1eeeff5ceeae97fce8b750 | 8667d294c091580d249a26b0fe34c6705919ebd9 | refs/heads/master | 2016-09-01T14:21:12.822245 | 2015-12-02T15:54:00 | 2015-12-02T15:54:00 | 47,188,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | #!/usr/bin/env python
#coding:utf-8
num1 = input("")
num2 = input("")
print "%i + %i = %i" %(num1, num2, num1+num2) | [
"aline.do.freitas@gmail.com"
] | aline.do.freitas@gmail.com |
4afee5f7a96fc247d59062cad5213d2fbf817ec8 | b8eb2203b74631944071ecfeb04988a40a401b78 | /Code/Preproccesing/Crowd_TFIDF (2).py | 1b6d95c9b6169cdd01e7e60c91856aef8737bb16 | [] | no_license | SPrio/Events-in-Text | 60c80d176f860c272350035e401d108ec95c12e5 | 222d552f50232436ed80af973d3b6dfc70d2d38e | refs/heads/master | 2020-03-20T16:55:38.716815 | 2016-05-20T10:18:05 | 2016-05-20T10:18:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,089 | py | import os
import math
def create_tfidf(dataset, sentences, wordtoid):
words_to_document_sentence_id_word ={}
words_to_document_sentence_id = {}
document_sentence_id_to_word_id_dict = {}
for line in wordtoid:
line = line.strip()
info = line.split('|')
word = info[0]
document_sentence_word_id = info[1]
document_sentence_id = "_".join(document_sentence_word_id.split("_")[:-1])
if not document_sentence_id in document_sentence_id_to_word_id_dict:
document_sentence_id_to_word_id_dict[document_sentence_id] = [(word, document_sentence_word_id)]
else:
temp = document_sentence_id_to_word_id_dict[document_sentence_id]
temp.append((word, document_sentence_word_id))
document_sentence_id_to_word_id_dict[document_sentence_id] = temp
for line in sentences:
line = line.strip()
info = line.split('|')
document_sentence_id = info[0]
sentence = info[1]
words = sentence.split(' ')
for word in words:
if word not in words_to_document_sentence_id:
words_to_document_sentence_id[word] = [document_sentence_id]
for obj in document_sentence_id_to_word_id_dict[document_sentence_id]:
wordtocomp = obj[0]
if word == wordtocomp:
#print word, wordtocomp, obj
document_sentence_word_id = obj[1]
words_to_document_sentence_id_word[word] = [document_sentence_word_id]
else:
temp = words_to_document_sentence_id[word]
temp.append(document_sentence_id)
words_to_document_sentence_id[word] = temp
temp = words_to_document_sentence_id_word[word]
for obj in document_sentence_id_to_word_id_dict[document_sentence_id]:
wordtocomp = obj[0]
if word == wordtocomp:
document_sentence_word_id = obj[1]
if not document_sentence_word_id in words_to_document_sentence_id_word[word]:
words_to_document_sentence_id_word[word] = [document_sentence_word_id]
temp.append(document_sentence_word_id)
words_to_document_sentence_id_word[word] = temp
writer = open('D:\\Dropbox\\241 Software Solutions\\Studie\\MA-Thesis\\Maurits\\Data\\EventTask\\Preproccesing\\word_Crowd_TF_IDFVALUES.csv', 'w+')
document_sentence_wordid_TF_CROWD_pos = {}
document_sentence_wordid_IDF_CROWD_pos ={}
document_sentence_wordid_TF_CROWD_neg = {}
document_sentence_wordid_IDF_CROWD_neg ={}
document_sentence_wordid_TFDIDF_CROWD_neg = {}
sentence_to_annotation_counts = {}
document_sentence_word_id_counts = {}
document_sentence_wordid_TFDIDF_CROWD_pos = {}
for line in dataset:
info = line.split("|")
document_sentence_word_id = info[0]
crowdCount = int(info[1])
document_sentence_word_id_counts[document_sentence_word_id] = crowdCount
document_sentence_id = "_".join(document_sentence_word_id.split("_")[:-1])
if not document_sentence_id in sentence_to_annotation_counts:
sentence_to_annotation_counts[document_sentence_id] = crowdCount
else:
sentence_to_annotation_counts[document_sentence_id] += crowdCount
for document_sentence_word_id in document_sentence_word_id_counts:
document_sentence_id = "_".join(document_sentence_word_id.split("_")[:-1])
total_annotation_count = sentence_to_annotation_counts[document_sentence_id]
word_annotation_count = document_sentence_word_id_counts[document_sentence_word_id]
document_sentence_wordid_TF_CROWD_pos[document_sentence_word_id] = float(word_annotation_count)/float(15)
document_sentence_wordid_TF_CROWD_neg[document_sentence_word_id] = float(15.0-word_annotation_count)/float(15)
for word in words_to_document_sentence_id:
if word == ',':
document_sentence_wordid_IDF_CROWD_pos[word] = 0.0
document_sentence_wordid_IDF_CROWD_neg[word] = 0.0
else:
if word == 'believe':
print len(words_to_document_sentence_id[word])
totalsentences = len(words_to_document_sentence_id[word])
totalsum= 0.0
totalsumneg =0.0
for document_sentence_word_id in words_to_document_sentence_id_word[word]:
#print document_sentence_wordid_TF_CROWD[document_sentence_word_id]
totalsum+=(document_sentence_wordid_TF_CROWD_pos[document_sentence_word_id]*15.0)
totalsumneg+= (document_sentence_wordid_TF_CROWD_neg[document_sentence_word_id]*15.0)
#print '*******'
if word == 'believe':
print totalsum
#print word, totalsentences, totalsum, words_to_document_sentence_id[word],math.log((totalsentences*15.0)), math.log((totalsentences*15.0)/ totalsum)
try:
#document_sentence_wordid_IDF_CROWD_pos[word] =1/math.log((totalsentences*15.0)/ totalsum, 10)
document_sentence_wordid_IDF_CROWD_pos[word] =math.log(1+(totalsentences*15.0)/ totalsum, 10)
except :
document_sentence_wordid_IDF_CROWD_pos[word] = 0.0
#print 1.0/math.log((totalsentences*15.0)/ totalsumneg)
try:
document_sentence_wordid_IDF_CROWD_neg[word] =math.log(1+(totalsentences*15.0)/ totalsumneg, 10)
except :
document_sentence_wordid_IDF_CROWD_neg[word] = 0.0
for word in document_sentence_wordid_IDF_CROWD_pos:
for document_sentence_word_id in words_to_document_sentence_id_word[word]:
#print word, document_sentence_wordid_TF_CROWD[document_sentence_word_id], document_sentence_wordid_IDF_CROWD[word]
document_sentence_wordid_TFDIDF_CROWD_pos[document_sentence_word_id] = document_sentence_wordid_TF_CROWD_pos[document_sentence_word_id]*document_sentence_wordid_IDF_CROWD_pos[word]
document_sentence_wordid_TFDIDF_CROWD_neg[document_sentence_word_id] = document_sentence_wordid_TF_CROWD_neg[document_sentence_word_id]*document_sentence_wordid_IDF_CROWD_neg[word]
print 'tf',document_sentence_wordid_TF_CROWD_pos['ABC19980108.1830.0711_20_1']
print 'idf', document_sentence_wordid_IDF_CROWD_pos['believe']
normalized_pos = {}
normalized_neg ={}
posvalues = document_sentence_wordid_TFDIDF_CROWD_pos.values()
minpos = min(posvalues)
maxpos = max(posvalues)
negvalues = document_sentence_wordid_TFDIDF_CROWD_neg.values()
minneg = min(negvalues)
maxneg = max(negvalues)
print minpos, maxpos
print minneg, maxneg
for key in document_sentence_wordid_TF_CROWD_pos:
#print document_sentence_wordid_TFDIDF_CROWD_pos[key]
#print document_sentence_wordid_TFDIDF_CROWD_neg[key]
#normalized_pos[key] = (document_sentence_wordid_TFDIDF_CROWD_pos[key]-min(minpos, minneg))/(max(maxpos,maxneg)-min(minpos, minneg))
#normalized_neg[key] = (document_sentence_wordid_TFDIDF_CROWD_neg[key]-min(minpos, minneg))/(max(maxpos,maxneg)-min(minpos, minneg))
writer.write(key+'|'+str(document_sentence_wordid_TFDIDF_CROWD_pos[key])+'|'+str(document_sentence_wordid_TFDIDF_CROWD_neg[key]))
writer.write('\n')
writer.flush()
os.fsync(writer.fileno())
writer.close()
return document_sentence_wordid_TF_CROWD_pos , document_sentence_wordid_TF_CROWD_pos
dataset = open('word_Crowd_classifications_FinalCrowdClassifications.csv', 'r')
sentences = open ('sentences.txt', 'r')
wordtoid = open ('wordtoid.txt', 'r')
create_tfidf(dataset,sentences,wordtoid)
| [
"mau_577@hotmail.com"
] | mau_577@hotmail.com |
d53d640b90ea1a5e90a03697bc99fe7c8e26c35e | 56baaf3127069ca31b6ec8a6015416d8e3f484b8 | /account/views.py | 098a9e3f16ab78c0ff0af78c6ad98d02cae74f82 | [] | no_license | payamQorbanpour/Borrow | 16ce210cfbdbc61261b6c50d241ce983e7f02796 | 82132a30345b88cfa412e58c6ba2a55eabcd051b | refs/heads/master | 2020-04-02T09:03:51.441742 | 2019-07-26T07:46:39 | 2019-07-26T07:46:39 | 154,274,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,713 | py | from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, update_session_auth_hash
from .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm
from django.contrib.auth.decorators import login_required
from .models import Profile
from django.contrib import messages
from django.contrib.auth.forms import PasswordChangeForm
# Create your views here.
@login_required
def dashboard(request):
return render(request, 'account/dashboard.html', {'section': 'dashboard'})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data['password'])
# Save the User object
new_user.save()
# Create the user profile
profile = Profile.objects.create(user=new_user)
return render(request, 'account/register_done.html', {'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request, 'account/register.html', {'user_form': user_form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user, data=request.POST)
profile_form = ProfileEditForm(instance=request.user.profile, data=request.POST, files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Profile updated successfully')
else:
messages.error(request, 'Error updating your profile')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(instance=request.user.profile)
return render(request, 'account/edit.html', {'user_form': user_form, 'profile_form': profile_form})
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('change_password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'accounts/change_password.html', {
'form': form
})
| [
"payamqorbanpour@gmail.com"
] | payamqorbanpour@gmail.com |
013d698540b83ad9614cd9791f75016949a40fe7 | 9fd2c9744d80c1f61f51ffb2fb0a2b9ef5ed3713 | /QuestionAnalysis/main.py | ada8e6bc84aa0e24d9620d4b8cbc7ec34de09a6d | [] | no_license | realentertain/WikiQA | 84a9bf688e3112d37b96ee7c3c7db2c778497e9b | cfa65f585f23a8af7ee5ed87b875712ab8fa28a2 | refs/heads/master | 2021-01-19T07:07:25.253829 | 2015-05-20T03:13:51 | 2015-05-20T03:13:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,388 | py | #!encoding=utf8
__author__ = 'user'
from process import file
import jieba.posseg as pseg
from process import question
from config.main import *
from DocsRetrieveSystem.docs_process import *
from AnswerExtraction.process.answer import *
def main_debug():
count = 0
start = False
process_data = {'QuestionSet': {'question': []}}
e = entity()
count = 0
find_answer_count = 0
result_txt = ""
for i in file.parse_xml(Location.question_answer_sample_file):
result_txt += "%s:%s:" % (i["q"],i["a"])
if question.is_q(i["q"]):
if e.find(i["a"]) is not None:
count += 1
#print "%s in entity list"%i["a"]
key_word_list = question.get_key_word_list(i["q"])
result_txt += ",".join(key_word_list) + ":"
#print "search by the key words list~"
fragments_list = top_ten_docs(key_word_list)
if len(fragments_list) == 0:
continue
#print "the fragment_list is:"
f_str = ""
# for f in fragments_list:
# print f
# if f.find(i["a"].encode("utf8")) > -1:
# print "this fragment contains the answer"
for f_i in range(len(fragments_list)):
f_str += "%d,%s."%(f_i+1, fragments_list[f_i])
result_txt += f_str.decode("utf8")
result_txt += ":"
if f_str.find(i["a"].encode("utf8")) > -1:
result_txt += "%s:" % (u"包含",)
else:
result_txt += "%s:" % (u"不包含",)
result_txt += "\n"
top_passage = select_top_passage(i["q"], fragments_list)
print "top passage is %s"%top_passage
if top_passage.find(i["q"].encode("utf8")) > -1:
print "this passage contains the answer"
final_answer = extract_answer(i["q"], top_passage)
print "the final answer is %s"%final_answer
if final_answer == i["a"]:
print "!!!!! found the answer"
find_answer_count += 1
else:
print "this is not a question"
count += 1
save_to_file(result_txt.encode("utf8"),"document_retrieve_system.txt",replace=True)
print "the question number:%s"%count
print "find %s answer"%find_answer_count
def test_answer_extraction():
count = 0
start = False
process_data = {'QuestionSet': {'question': []}}
e = entity()
count = 0
find_answer_count = 0
result_txt = ""
for i in file.parse_xml(Location.question_answer_sample_file):
if question.is_q(i["q"]):
if e.find(i["a"]) is not None:
count += 1
#print "%s in entity list"%i["a"]
key_word_list = question.get_key_word_list(i["q"])
#print "search by the key words list~"
fragments_list = top_ten_docs(key_word_list)
if len(fragments_list) == 0:
continue
f_str = ""
for f_i in range(len(fragments_list)):
f_str += "%d,%s."%(f_i+1, fragments_list[f_i])
if f_str.find(i["a"].encode("utf8")) > -1:
result_txt += "%s:%s:" % (i["q"],i["a"])
result_txt += ",".join(key_word_list) + ":"
t_passage = select_top_passage(i["q"], fragments_list)
result_txt += "%s:"%t_passage.decode("utf8")
if t_passage.find(i["a"].encode("utf8")) > -1:
result_txt += "%s:" % (u"包含",)
else:
result_txt += "%s:" % (u"不包含",)
final_answer = extract_answer(i["q"], t_passage)
result_txt += "%s:"%final_answer
result_txt += "\n"
else:
print "this is not a question"
count += 1
save_to_file(result_txt.encode("utf8"),"answer_extraction.txt",replace=True)
print "the question number:%s"%count
print "find %s answer"%find_answer_count
def bool_search_debug():
key_word_list = [u"外语片",u"华语",u"奥斯卡"]
print "key word list :" + ",".join(key_word_list)
print "search result :"
fragment_list = top_ten_docs(key_word_list)
for f in fragment_list:
print f
if __name__ == '__main__':
main_debug() | [
"fucusy@gmail.com"
] | fucusy@gmail.com |
7b96f80980d5394fe002ccfabf254a0d209a4eaa | f901d116a07d410f5be08b1760fcef6aa5222574 | /buns_and_cakes/main.py | 5653225de66566eec7115db9974103bcba523847 | [] | no_license | JackCX777/buns_and_cakes | 7b8e39680d92b6368398e7e5d4324be2c5855bcc | 03358e62076fb7c1310394e7c006a5e2af3aaa8a | refs/heads/main | 2023-05-22T13:01:16.067789 | 2021-05-27T08:39:02 | 2021-05-27T08:39:02 | 363,499,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import os
from dotenv import load_dotenv
import vk_api
from vk_api.bot_longpoll import VkBotLongPoll
from db_adapter import Categories
from bot_setup import bot_init
load_dotenv()
ACCESS_TOKEN = os.getenv('ACCESS_TOKEN')
GROUP_ID = os.getenv('GROUP_ID')
API_VERSION = os.getenv('API_VERSION')
OWNER_ID = os.getenv('OWNER_ID')
def run_vk_bot_machine():
"""
The run_vk_bot_machine function creates the VkApi class instance as vk_session, then pass it to
bot_init function for initial setup. After it creates VkBotLongPoll class instance and runs VkBotLongPoll
listen() loop. Every event in VkBotLongPoll listen() loop is then passed to the get_event method of VkBot
class.
Returns:
None
"""
vk_session = vk_api.VkApi(token=ACCESS_TOKEN)
vk_bot = bot_init(session=vk_session, categories=Categories)
longpoll = VkBotLongPoll(vk_session, GROUP_ID)
vk_bot.owner_id = OWNER_ID
for event in longpoll.listen():
try:
vk_bot.get_event(event)
except Exception as error:
print('Error :', error.__repr__())
if __name__ == '__main__':
run_vk_bot_machine()
| [
"jackcx777@gmail.com"
] | jackcx777@gmail.com |
9347db929779b693fa84fc806d4c890b30ba9e6c | 5f8ab664edde2353a2b35d07c9d7fa9bb8970689 | /FDforClassification/FDforClassification/settings.py | 87b56c7640fbcae939778368485d059b05922b32 | [] | no_license | zhoulw13/Classification-FDLayout | 661c1733bbc733d7afb8521f215f1dfd264dc2aa | ac1865169d22d153cf59891bea2b10fbd570bdcd | refs/heads/master | 2021-09-02T14:59:41.639293 | 2018-01-03T08:29:58 | 2018-01-03T08:29:58 | 113,308,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,300 | py | """
Django settings for FDforClassification project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2h3&$3eas2$84xb_@3m0u1__s!mx4@s5fntml&*6bx-3v^g%ez'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FDforClassification.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FDforClassification.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, "node_modules"),
)
| [
"huangrp2013@gmial.com"
] | huangrp2013@gmial.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.